1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
22 #define WANT_CPU_FRVBF
27 #include "cgen-engine.h"
32 /* Maintain a flag in order to know when to write the address of the next
33 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
35 int frvbf_write_next_vliw_addr_to_LR
;
37 /* The contents of BUF are in target byte order. */
39 frvbf_fetch_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
41 if (rn
<= GR_REGNUM_MAX
)
42 SETTSI (buf
, GET_H_GR (rn
));
43 else if (rn
<= FR_REGNUM_MAX
)
44 SETTSI (buf
, GET_H_FR (rn
- GR_REGNUM_MAX
- 1));
45 else if (rn
== PC_REGNUM
)
46 SETTSI (buf
, GET_H_PC ());
47 else if (rn
== LR_REGNUM
)
48 SETTSI (buf
, GET_H_SPR (H_SPR_LR
));
50 SETTSI (buf
, 0xdeadbeef);
55 /* The contents of BUF are in target byte order. */
58 frvbf_store_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
60 if (rn
<= GR_REGNUM_MAX
)
61 SET_H_GR (rn
, GETTSI (buf
));
62 else if (rn
<= FR_REGNUM_MAX
)
63 SET_H_FR (rn
- GR_REGNUM_MAX
- 1, GETTSI (buf
));
64 else if (rn
== PC_REGNUM
)
65 SET_H_PC (GETTSI (buf
));
66 else if (rn
== LR_REGNUM
)
67 SET_H_SPR (H_SPR_LR
, GETTSI (buf
));
72 /* Cover fns to access the general registers. */
74 frvbf_h_gr_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
76 frv_check_gr_access (current_cpu
, gr
);
77 return CPU (h_gr
[gr
]);
81 frvbf_h_gr_set_handler (SIM_CPU
*current_cpu
, UINT gr
, USI newval
)
83 frv_check_gr_access (current_cpu
, gr
);
86 return; /* Storing into gr0 has no effect. */
88 CPU (h_gr
[gr
]) = newval
;
91 /* Cover fns to access the floating point registers. */
93 frvbf_h_fr_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
95 frv_check_fr_access (current_cpu
, fr
);
96 return CPU (h_fr
[fr
]);
100 frvbf_h_fr_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SF newval
)
102 frv_check_fr_access (current_cpu
, fr
);
103 CPU (h_fr
[fr
]) = newval
;
106 /* Cover fns to access the general registers as double words. */
108 check_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
110 if (reg
& align_mask
)
112 SIM_DESC sd
= CPU_STATE (current_cpu
);
113 switch (STATE_ARCHITECTURE (sd
)->mach
)
117 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
119 case bfd_mach_frvtomcat
:
122 frv_queue_register_exception_interrupt (current_cpu
,
136 check_fr_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
138 if (reg
& align_mask
)
140 SIM_DESC sd
= CPU_STATE (current_cpu
);
141 switch (STATE_ARCHITECTURE (sd
)->mach
)
145 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
147 case bfd_mach_frvtomcat
:
151 struct frv_fp_exception_info fp_info
= {
152 FSR_NO_EXCEPTION
, FTT_INVALID_FR
154 frv_queue_fp_exception_interrupt (current_cpu
, & fp_info
);
168 check_memory_alignment (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
170 if (address
& align_mask
)
172 SIM_DESC sd
= CPU_STATE (current_cpu
);
173 switch (STATE_ARCHITECTURE (sd
)->mach
)
176 frv_queue_data_access_error_interrupt (current_cpu
, address
);
178 case bfd_mach_frvtomcat
:
181 frv_queue_mem_address_not_aligned_interrupt (current_cpu
, address
);
187 address
&= ~align_mask
;
194 frvbf_h_gr_double_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
199 return 0; /* gr0 is always 0. */
201 /* Check the register alignment. */
202 gr
= check_register_alignment (current_cpu
, gr
, 1);
204 value
= GET_H_GR (gr
);
206 value
|= (USI
) GET_H_GR (gr
+ 1);
211 frvbf_h_gr_double_set_handler (SIM_CPU
*current_cpu
, UINT gr
, DI newval
)
214 return; /* Storing into gr0 has no effect. */
216 /* Check the register alignment. */
217 gr
= check_register_alignment (current_cpu
, gr
, 1);
219 SET_H_GR (gr
, (newval
>> 32) & 0xffffffff);
220 SET_H_GR (gr
+ 1, (newval
) & 0xffffffff);
223 /* Cover fns to access the floating point register as double words. */
225 frvbf_h_fr_double_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
232 /* Check the register alignment. */
233 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
235 if (CURRENT_HOST_BYTE_ORDER
== LITTLE_ENDIAN
)
237 value
.as_sf
[1] = GET_H_FR (fr
);
238 value
.as_sf
[0] = GET_H_FR (fr
+ 1);
242 value
.as_sf
[0] = GET_H_FR (fr
);
243 value
.as_sf
[1] = GET_H_FR (fr
+ 1);
250 frvbf_h_fr_double_set_handler (SIM_CPU
*current_cpu
, UINT fr
, DF newval
)
257 /* Check the register alignment. */
258 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
260 value
.as_df
= newval
;
261 if (CURRENT_HOST_BYTE_ORDER
== LITTLE_ENDIAN
)
263 SET_H_FR (fr
, value
.as_sf
[1]);
264 SET_H_FR (fr
+ 1, value
.as_sf
[0]);
268 SET_H_FR (fr
, value
.as_sf
[0]);
269 SET_H_FR (fr
+ 1, value
.as_sf
[1]);
273 /* Cover fns to access the floating point register as integer words. */
275 frvbf_h_fr_int_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
282 value
.as_sf
= GET_H_FR (fr
);
287 frvbf_h_fr_int_set_handler (SIM_CPU
*current_cpu
, UINT fr
, USI newval
)
294 value
.as_usi
= newval
;
295 SET_H_FR (fr
, value
.as_sf
);
298 /* Cover fns to access the coprocessor registers as double words. */
300 frvbf_h_cpr_double_get_handler (SIM_CPU
*current_cpu
, UINT cpr
)
304 /* Check the register alignment. */
305 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
307 value
= GET_H_CPR (cpr
);
309 value
|= (USI
) GET_H_CPR (cpr
+ 1);
314 frvbf_h_cpr_double_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, DI newval
)
316 /* Check the register alignment. */
317 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
319 SET_H_CPR (cpr
, (newval
>> 32) & 0xffffffff);
320 SET_H_CPR (cpr
+ 1, (newval
) & 0xffffffff);
323 /* Cover fns to write registers as quad words. */
325 frvbf_h_gr_quad_set_handler (SIM_CPU
*current_cpu
, UINT gr
, SI
*newval
)
328 return; /* Storing into gr0 has no effect. */
330 /* Check the register alignment. */
331 gr
= check_register_alignment (current_cpu
, gr
, 3);
333 SET_H_GR (gr
, newval
[0]);
334 SET_H_GR (gr
+ 1, newval
[1]);
335 SET_H_GR (gr
+ 2, newval
[2]);
336 SET_H_GR (gr
+ 3, newval
[3]);
340 frvbf_h_fr_quad_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SI
*newval
)
342 /* Check the register alignment. */
343 fr
= check_fr_register_alignment (current_cpu
, fr
, 3);
345 SET_H_FR (fr
, newval
[0]);
346 SET_H_FR (fr
+ 1, newval
[1]);
347 SET_H_FR (fr
+ 2, newval
[2]);
348 SET_H_FR (fr
+ 3, newval
[3]);
352 frvbf_h_cpr_quad_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, SI
*newval
)
354 /* Check the register alignment. */
355 cpr
= check_register_alignment (current_cpu
, cpr
, 3);
357 SET_H_CPR (cpr
, newval
[0]);
358 SET_H_CPR (cpr
+ 1, newval
[1]);
359 SET_H_CPR (cpr
+ 2, newval
[2]);
360 SET_H_CPR (cpr
+ 3, newval
[3]);
363 /* Cover fns to access the special purpose registers. */
365 frvbf_h_spr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
367 /* Check access restrictions. */
368 frv_check_spr_read_access (current_cpu
, spr
);
373 return spr_psr_get_handler (current_cpu
);
375 return spr_tbr_get_handler (current_cpu
);
377 return spr_bpsr_get_handler (current_cpu
);
379 return spr_ccr_get_handler (current_cpu
);
381 return spr_cccr_get_handler (current_cpu
);
386 return spr_sr_get_handler (current_cpu
, spr
);
389 return CPU (h_spr
[spr
]);
395 frvbf_h_spr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
397 FRV_REGISTER_CONTROL
*control
;
401 /* Check access restrictions. */
402 frv_check_spr_write_access (current_cpu
, spr
);
404 /* Only set those fields which are writeable. */
405 control
= CPU_REGISTER_CONTROL (current_cpu
);
406 mask
= control
->spr
[spr
].read_only_mask
;
407 oldval
= GET_H_SPR (spr
);
409 newval
= (newval
& ~mask
) | (oldval
& mask
);
411 /* Some registers are represented by individual components which are
412 referenced more often than the register itself. */
416 spr_psr_set_handler (current_cpu
, newval
);
419 spr_tbr_set_handler (current_cpu
, newval
);
422 spr_bpsr_set_handler (current_cpu
, newval
);
425 spr_ccr_set_handler (current_cpu
, newval
);
428 spr_cccr_set_handler (current_cpu
, newval
);
434 spr_sr_set_handler (current_cpu
, spr
, newval
);
437 frv_cache_reconfigure (current_cpu
, CPU_INSN_CACHE (current_cpu
));
440 CPU (h_spr
[spr
]) = newval
;
445 /* Cover fns to access the gr_hi and gr_lo registers. */
447 frvbf_h_gr_hi_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
449 return (GET_H_GR(gr
) >> 16) & 0xffff;
453 frvbf_h_gr_hi_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
455 USI value
= (GET_H_GR (gr
) & 0xffff) | (newval
<< 16);
456 SET_H_GR (gr
, value
);
460 frvbf_h_gr_lo_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
462 return GET_H_GR(gr
) & 0xffff;
466 frvbf_h_gr_lo_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
468 USI value
= (GET_H_GR (gr
) & 0xffff0000) | (newval
& 0xffff);
469 SET_H_GR (gr
, value
);
472 /* Cover fns to access the tbr bits. */
474 spr_tbr_get_handler (SIM_CPU
*current_cpu
)
476 int tbr
= ((GET_H_TBR_TBA () & 0xfffff) << 12) |
477 ((GET_H_TBR_TT () & 0xff) << 4);
483 spr_tbr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
487 SET_H_TBR_TBA ((tbr
>> 12) & 0xfffff) ;
488 SET_H_TBR_TT ((tbr
>> 4) & 0xff) ;
491 /* Cover fns to access the bpsr bits. */
493 spr_bpsr_get_handler (SIM_CPU
*current_cpu
)
495 int bpsr
= ((GET_H_BPSR_BS () & 0x1) << 12) |
496 ((GET_H_BPSR_BET () & 0x1) );
502 spr_bpsr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
506 SET_H_BPSR_BS ((bpsr
>> 12) & 1);
507 SET_H_BPSR_BET ((bpsr
) & 1);
510 /* Cover fns to access the psr bits. */
512 spr_psr_get_handler (SIM_CPU
*current_cpu
)
514 int psr
= ((GET_H_PSR_IMPLE () & 0xf) << 28) |
515 ((GET_H_PSR_VER () & 0xf) << 24) |
516 ((GET_H_PSR_ICE () & 0x1) << 16) |
517 ((GET_H_PSR_NEM () & 0x1) << 14) |
518 ((GET_H_PSR_CM () & 0x1) << 13) |
519 ((GET_H_PSR_BE () & 0x1) << 12) |
520 ((GET_H_PSR_ESR () & 0x1) << 11) |
521 ((GET_H_PSR_EF () & 0x1) << 8) |
522 ((GET_H_PSR_EM () & 0x1) << 7) |
523 ((GET_H_PSR_PIL () & 0xf) << 3) |
524 ((GET_H_PSR_S () & 0x1) << 2) |
525 ((GET_H_PSR_PS () & 0x1) << 1) |
526 ((GET_H_PSR_ET () & 0x1) );
532 spr_psr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
534 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
536 SET_H_PSR_S ((newval
>> 2) & 1);
538 SET_H_PSR_IMPLE ((newval
>> 28) & 0xf);
539 SET_H_PSR_VER ((newval
>> 24) & 0xf);
540 SET_H_PSR_ICE ((newval
>> 16) & 1);
541 SET_H_PSR_NEM ((newval
>> 14) & 1);
542 SET_H_PSR_CM ((newval
>> 13) & 1);
543 SET_H_PSR_BE ((newval
>> 12) & 1);
544 SET_H_PSR_ESR ((newval
>> 11) & 1);
545 SET_H_PSR_EF ((newval
>> 8) & 1);
546 SET_H_PSR_EM ((newval
>> 7) & 1);
547 SET_H_PSR_PIL ((newval
>> 3) & 0xf);
548 SET_H_PSR_PS ((newval
>> 1) & 1);
549 SET_H_PSR_ET ((newval
) & 1);
553 frvbf_h_psr_s_set_handler (SIM_CPU
*current_cpu
, BI newval
)
555 /* If switching from user to supervisor mode, or vice-versa, then switch
556 the supervisor/user context. */
557 int psr_s
= GET_H_PSR_S ();
558 if (psr_s
!= (newval
& 1))
560 frvbf_switch_supervisor_user_context (current_cpu
);
561 CPU (h_psr_s
) = newval
& 1;
565 /* Cover fns to access the ccr bits. */
567 spr_ccr_get_handler (SIM_CPU
*current_cpu
)
569 int ccr
= ((GET_H_ICCR (H_ICCR_ICC3
) & 0xf) << 28) |
570 ((GET_H_ICCR (H_ICCR_ICC2
) & 0xf) << 24) |
571 ((GET_H_ICCR (H_ICCR_ICC1
) & 0xf) << 20) |
572 ((GET_H_ICCR (H_ICCR_ICC0
) & 0xf) << 16) |
573 ((GET_H_FCCR (H_FCCR_FCC3
) & 0xf) << 12) |
574 ((GET_H_FCCR (H_FCCR_FCC2
) & 0xf) << 8) |
575 ((GET_H_FCCR (H_FCCR_FCC1
) & 0xf) << 4) |
576 ((GET_H_FCCR (H_FCCR_FCC0
) & 0xf) );
582 spr_ccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
586 SET_H_ICCR (H_ICCR_ICC3
, (newval
>> 28) & 0xf);
587 SET_H_ICCR (H_ICCR_ICC2
, (newval
>> 24) & 0xf);
588 SET_H_ICCR (H_ICCR_ICC1
, (newval
>> 20) & 0xf);
589 SET_H_ICCR (H_ICCR_ICC0
, (newval
>> 16) & 0xf);
590 SET_H_FCCR (H_FCCR_FCC3
, (newval
>> 12) & 0xf);
591 SET_H_FCCR (H_FCCR_FCC2
, (newval
>> 8) & 0xf);
592 SET_H_FCCR (H_FCCR_FCC1
, (newval
>> 4) & 0xf);
593 SET_H_FCCR (H_FCCR_FCC0
, (newval
) & 0xf);
597 frvbf_set_icc_for_shift_right (
598 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
601 /* Set the C flag of the given icc to the logical OR of the bits shifted
603 int mask
= (1 << shift
) - 1;
604 if ((value
& mask
) != 0)
611 frvbf_set_icc_for_shift_left (
612 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
615 /* Set the V flag of the given icc to the logical OR of the bits shifted
617 int mask
= ((1 << shift
) - 1) << (32 - shift
);
618 if ((value
& mask
) != 0)
624 /* Cover fns to access the cccr bits. */
626 spr_cccr_get_handler (SIM_CPU
*current_cpu
)
628 int cccr
= ((GET_H_CCCR (H_CCCR_CC7
) & 0x3) << 14) |
629 ((GET_H_CCCR (H_CCCR_CC6
) & 0x3) << 12) |
630 ((GET_H_CCCR (H_CCCR_CC5
) & 0x3) << 10) |
631 ((GET_H_CCCR (H_CCCR_CC4
) & 0x3) << 8) |
632 ((GET_H_CCCR (H_CCCR_CC3
) & 0x3) << 6) |
633 ((GET_H_CCCR (H_CCCR_CC2
) & 0x3) << 4) |
634 ((GET_H_CCCR (H_CCCR_CC1
) & 0x3) << 2) |
635 ((GET_H_CCCR (H_CCCR_CC0
) & 0x3) );
641 spr_cccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
645 SET_H_CCCR (H_CCCR_CC7
, (newval
>> 14) & 0x3);
646 SET_H_CCCR (H_CCCR_CC6
, (newval
>> 12) & 0x3);
647 SET_H_CCCR (H_CCCR_CC5
, (newval
>> 10) & 0x3);
648 SET_H_CCCR (H_CCCR_CC4
, (newval
>> 8) & 0x3);
649 SET_H_CCCR (H_CCCR_CC3
, (newval
>> 6) & 0x3);
650 SET_H_CCCR (H_CCCR_CC2
, (newval
>> 4) & 0x3);
651 SET_H_CCCR (H_CCCR_CC1
, (newval
>> 2) & 0x3);
652 SET_H_CCCR (H_CCCR_CC0
, (newval
) & 0x3);
655 /* Cover fns to access the sr bits. */
657 spr_sr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
659 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
660 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
661 int psr_esr
= GET_H_PSR_ESR ();
663 return GET_H_GR (4 + (spr
- H_SPR_SR0
));
665 return CPU (h_spr
[spr
]);
669 spr_sr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
671 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
672 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
673 int psr_esr
= GET_H_PSR_ESR ();
675 SET_H_GR (4 + (spr
- H_SPR_SR0
), newval
);
677 CPU (h_spr
[spr
]) = newval
;
680 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
682 frvbf_switch_supervisor_user_context (SIM_CPU
*current_cpu
)
684 if (GET_H_PSR_ESR ())
686 /* We need to be in supervisor mode to swap the registers. Access the
687 PSR.S directly in order to avoid recursive context switches. */
689 int save_psr_s
= CPU (h_psr_s
);
691 for (i
= 0; i
< 4; ++i
)
694 int spr
= i
+ H_SPR_SR0
;
695 SI tmp
= GET_H_SPR (spr
);
696 SET_H_SPR (spr
, GET_H_GR (gr
));
699 CPU (h_psr_s
) = save_psr_s
;
703 /* Handle load/store of quad registers. */
705 frvbf_load_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
710 /* Check memory alignment */
711 address
= check_memory_alignment (current_cpu
, address
, 0xf);
713 /* If we need to count cycles, then the cache operation will be
714 initiated from the model profiling functions.
715 See frvbf_model_.... */
718 CPU_LOAD_ADDRESS (current_cpu
) = address
;
719 CPU_LOAD_LENGTH (current_cpu
) = 16;
723 for (i
= 0; i
< 4; ++i
)
725 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
728 sim_queue_fn_xi_write (current_cpu
, frvbf_h_gr_quad_set_handler
, targ_ix
,
734 frvbf_store_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
740 /* Check register and memory alignment. */
741 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
742 address
= check_memory_alignment (current_cpu
, address
, 0xf);
744 for (i
= 0; i
< 4; ++i
)
746 /* GR0 is always 0. */
750 value
[i
] = GET_H_GR (src_ix
+ i
);
753 if (GET_HSR0_DCE (hsr0
))
754 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
756 sim_queue_mem_xi_write (current_cpu
, address
, value
);
760 frvbf_load_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
765 /* Check memory alignment */
766 address
= check_memory_alignment (current_cpu
, address
, 0xf);
768 /* If we need to count cycles, then the cache operation will be
769 initiated from the model profiling functions.
770 See frvbf_model_.... */
773 CPU_LOAD_ADDRESS (current_cpu
) = address
;
774 CPU_LOAD_LENGTH (current_cpu
) = 16;
778 for (i
= 0; i
< 4; ++i
)
780 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
783 sim_queue_fn_xi_write (current_cpu
, frvbf_h_fr_quad_set_handler
, targ_ix
,
789 frvbf_store_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
795 /* Check register and memory alignment. */
796 src_ix
= check_fr_register_alignment (current_cpu
, src_ix
, 3);
797 address
= check_memory_alignment (current_cpu
, address
, 0xf);
799 for (i
= 0; i
< 4; ++i
)
800 value
[i
] = GET_H_FR (src_ix
+ i
);
803 if (GET_HSR0_DCE (hsr0
))
804 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
806 sim_queue_mem_xi_write (current_cpu
, address
, value
);
810 frvbf_load_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
815 /* Check memory alignment */
816 address
= check_memory_alignment (current_cpu
, address
, 0xf);
818 /* If we need to count cycles, then the cache operation will be
819 initiated from the model profiling functions.
820 See frvbf_model_.... */
823 CPU_LOAD_ADDRESS (current_cpu
) = address
;
824 CPU_LOAD_LENGTH (current_cpu
) = 16;
828 for (i
= 0; i
< 4; ++i
)
830 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
833 sim_queue_fn_xi_write (current_cpu
, frvbf_h_cpr_quad_set_handler
, targ_ix
,
839 frvbf_store_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
845 /* Check register and memory alignment. */
846 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
847 address
= check_memory_alignment (current_cpu
, address
, 0xf);
849 for (i
= 0; i
< 4; ++i
)
850 value
[i
] = GET_H_CPR (src_ix
+ i
);
853 if (GET_HSR0_DCE (hsr0
))
854 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
856 sim_queue_mem_xi_write (current_cpu
, address
, value
);
860 frvbf_signed_integer_divide (
861 SIM_CPU
*current_cpu
, SI arg1
, SI arg2
, int target_index
, int non_excepting
864 enum frv_dtt dtt
= FRV_DTT_NO_EXCEPTION
;
865 if (arg1
== 0x80000000 && arg2
== -1)
867 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
868 otherwise it may result in 0x7fffffff (sparc compatibility) or
869 0x80000000 (C language compatibility). */
871 dtt
= FRV_DTT_OVERFLOW
;
874 if (GET_ISR_EDE (isr
))
875 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
878 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
880 frvbf_force_update (current_cpu
); /* Force update of target register. */
883 dtt
= FRV_DTT_DIVISION_BY_ZERO
;
885 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
888 /* Check for exceptions. */
889 if (dtt
!= FRV_DTT_NO_EXCEPTION
)
890 dtt
= frvbf_division_exception (current_cpu
, dtt
, target_index
,
892 if (non_excepting
&& dtt
== FRV_DTT_NO_EXCEPTION
)
894 /* Non excepting instruction. Clear the NE flag for the target
897 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
898 CLEAR_NE_FLAG (NE_flags
, target_index
);
899 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
904 frvbf_unsigned_integer_divide (
905 SIM_CPU
*current_cpu
, USI arg1
, USI arg2
, int target_index
, int non_excepting
909 frvbf_division_exception (current_cpu
, FRV_DTT_DIVISION_BY_ZERO
,
910 target_index
, non_excepting
);
913 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
917 /* Non excepting instruction. Clear the NE flag for the target
920 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
921 CLEAR_NE_FLAG (NE_flags
, target_index
);
922 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
927 /* Clear accumulators. */
929 frvbf_clear_accumulators (SIM_CPU
*current_cpu
, SI acc_ix
, int A
)
931 SIM_DESC sd
= CPU_STATE (current_cpu
);
933 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
) ? 8 :
934 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
) ? 8 :
935 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
) ? 4 :
937 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
939 ps
->mclracc_acc
= acc_ix
;
941 if (A
== 0 || acc_ix
!= 0) /* Clear 1 accumuator? */
943 /* This instruction is a nop if the referenced accumulator is not
945 if (acc_ix
< acc_num
)
946 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, acc_ix
, 0);
950 /* Clear all implemented accumulators. */
952 for (i
= 0; i
< acc_num
; ++i
)
953 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, i
, 0);
957 /* Functions to aid insn semantics. */
959 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
961 frvbf_scan_result (SIM_CPU
*current_cpu
, SI value
)
969 /* Find the position of the first non-zero bit.
970 The loop will terminate since there is guaranteed to be at least one
972 mask
= 1 << (sizeof (mask
) * 8 - 1);
973 for (i
= 0; (value
& mask
) == 0; ++i
)
979 /* Compute the result of the cut insns. */
981 frvbf_cut (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
, SI cut_point
)
986 result
= reg1
<< cut_point
;
987 result
|= (reg2
>> (32 - cut_point
)) & ((1 << cut_point
) - 1);
990 result
= reg2
<< (cut_point
- 32);
995 /* Compute the result of the cut insns. */
997 frvbf_media_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
999 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1000 cut_point
= cut_point
<< 26 >> 26;
1002 /* The cut_point is relative to bit 40 of 64 bits. */
1004 return (acc
<< (cut_point
+ 24)) >> 32;
1006 /* Extend the sign bit (bit 40) for negative cuts. */
1007 if (cut_point
== -32)
1008 return (acc
<< 24) >> 63; /* Special case for full shiftout. */
1010 return (acc
<< 24) >> (32 + -cut_point
);
1013 /* Compute the result of the cut insns. */
1015 frvbf_media_cut_ss (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1017 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1018 cut_point
= cut_point
<< 26 >> 26;
1022 /* The cut_point is relative to bit 40 of 64 bits. */
1023 DI shifted
= acc
<< (cut_point
+ 24);
1024 DI unshifted
= shifted
>> (cut_point
+ 24);
1026 /* The result will be saturated if significant bits are shifted out. */
1027 if (unshifted
!= acc
)
1035 /* The result will not be saturated, so use the code for the normal cut. */
1036 return frvbf_media_cut (current_cpu
, acc
, cut_point
);
1039 /* Compute the result of int accumulator cut (SCUTSS). */
1041 frvbf_iacc_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1043 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1044 cut_point
= cut_point
<< 25 >> 25;
1046 if (cut_point
<= -32)
1047 cut_point
= -31; /* Special case for full shiftout. */
1049 /* Negative cuts (cannot saturate). */
1051 return acc
>> (32 + -cut_point
);
1053 /* Positive cuts will saturate if significant bits are shifted out. */
1054 if (acc
!= ((acc
<< cut_point
) >> cut_point
))
1060 /* No saturate, just cut. */
1061 return ((acc
<< cut_point
) >> 32);
1064 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1066 frvbf_shift_left_arith_saturate (SIM_CPU
*current_cpu
, SI arg1
, SI arg2
)
1070 /* FIXME: what to do with negative shift amt? */
1077 /* Signed shift by 31 or greater saturates by definition. */
1080 return (SI
) 0x7fffffff;
1082 return (SI
) 0x80000000;
1084 /* OK, arg2 is between 1 and 31. */
1085 neg_arg1
= (arg1
< 0);
1088 /* Check for sign bit change (saturation). */
1089 if (neg_arg1
&& (arg1
>= 0))
1090 return (SI
) 0x80000000;
1091 else if (!neg_arg1
&& (arg1
< 0))
1092 return (SI
) 0x7fffffff;
1093 } while (--arg2
> 0);
1098 /* Simulate the media custom insns. */
1100 frvbf_media_cop (SIM_CPU
*current_cpu
, int cop_num
)
1102 /* The semantics of the insn are a nop, since it is implementation defined.
1103 We do need to check whether it's implemented and set up for MTRAP
1105 USI msr0
= GET_MSR (0);
1106 if (GET_MSR_EMCI (msr0
) == 0)
1108 /* no interrupt queued at this time. */
1109 frv_set_mp_exception_registers (current_cpu
, MTT_UNIMPLEMENTED_MPOP
, 0);
1113 /* Simulate the media average (MAVEH) insn. */
1115 do_media_average (SIM_CPU
*current_cpu
, HI arg1
, HI arg2
)
1117 SIM_DESC sd
= CPU_STATE (current_cpu
);
1118 SI sum
= (arg1
+ arg2
);
1119 HI result
= sum
>> 1;
1122 /* On fr400 and fr550, check the rounding mode. On other machines rounding is always
1123 toward negative infinity and the result is already correctly rounded. */
1124 switch (STATE_ARCHITECTURE (sd
)->mach
)
1126 /* Need to check rounding mode. */
1127 case bfd_mach_fr400
:
1128 case bfd_mach_fr550
:
1129 /* Check whether rounding will be required. Rounding will be required
1130 if the sum is an odd number. */
1131 rounding_value
= sum
& 1;
1134 USI msr0
= GET_MSR (0);
1135 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1136 if (GET_MSR_SRDAV (msr0
))
1138 /* MSR0.RD controls rounding. */
1139 switch (GET_MSR_RD (msr0
))
1142 /* Round to nearest. */
1147 /* Round toward 0. */
1152 /* Round toward positive infinity. */
1156 /* Round toward negative infinity. The result is already
1157 correctly rounded. */
1166 /* MSR0.RDAV controls rounding. If set, round toward positive
1167 infinity. Otherwise the result is already rounded correctly
1168 toward negative infinity. */
1169 if (GET_MSR_RDAV (msr0
))
1182 frvbf_media_average (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
)
1185 result
= do_media_average (current_cpu
, reg1
& 0xffff, reg2
& 0xffff);
1187 result
|= do_media_average (current_cpu
, (reg1
>> 16) & 0xffff,
1188 (reg2
>> 16) & 0xffff) << 16;
1192 /* Maintain a flag in order to know when to write the address of the next
1193 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1195 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU
*current_cpu
, int value
)
1197 frvbf_write_next_vliw_addr_to_LR
= value
;
1201 frvbf_set_ne_index (SIM_CPU
*current_cpu
, int index
)
1205 /* Save the target register so interrupt processing can set its NE flag
1206 in the event of an exception. */
1207 frv_interrupt_state
.ne_index
= index
;
1209 /* Clear the NE flag of the target register. It will be reset if necessary
1210 in the event of an exception. */
1211 GET_NE_FLAGS (NE_flags
, H_SPR_FNER0
);
1212 CLEAR_NE_FLAG (NE_flags
, index
);
1213 SET_NE_FLAGS (H_SPR_FNER0
, NE_flags
);
1217 frvbf_force_update (SIM_CPU
*current_cpu
)
1219 CGEN_WRITE_QUEUE
*q
= CPU_WRITE_QUEUE (current_cpu
);
1220 int ix
= CGEN_WRITE_QUEUE_INDEX (q
);
1223 CGEN_WRITE_QUEUE_ELEMENT
*item
= CGEN_WRITE_QUEUE_ELEMENT (q
, ix
- 1);
1224 item
->flags
|= FRV_WRITE_QUEUE_FORCE_WRITE
;
1228 /* Condition code logic. */
1230 andcr
, orcr
, xorcr
, nandcr
, norcr
, andncr
, orncr
, nandncr
, norncr
,
1234 enum cr_result
{cr_undefined
, cr_undefined1
, cr_false
, cr_true
};
1236 static enum cr_result
1237 cr_logic
[num_cr_ops
][4][4] = {
1240 /* undefined undefined false true */
1241 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1242 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1243 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1244 /* true */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
}
1248 /* undefined undefined false true */
1249 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1250 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1251 /* false */ {cr_false
, cr_false
, cr_false
, cr_true
},
1252 /* true */ {cr_true
, cr_true
, cr_true
, cr_true
}
1256 /* undefined undefined false true */
1257 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1258 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1259 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1260 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1264 /* undefined undefined false true */
1265 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1266 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1267 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1268 /* true */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
}
1272 /* undefined undefined false true */
1273 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1274 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1275 /* false */ {cr_true
, cr_true
, cr_true
, cr_false
},
1276 /* true */ {cr_false
, cr_false
, cr_false
, cr_false
}
1280 /* undefined undefined false true */
1281 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1282 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1283 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1284 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1288 /* undefined undefined false true */
1289 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1290 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1291 /* false */ {cr_true
, cr_true
, cr_true
, cr_true
},
1292 /* true */ {cr_false
, cr_false
, cr_false
, cr_true
}
1296 /* undefined undefined false true */
1297 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1298 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1299 /* false */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1300 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1304 /* undefined undefined false true */
1305 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1306 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1307 /* false */ {cr_false
, cr_false
, cr_false
, cr_false
},
1308 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1313 frvbf_cr_logic (SIM_CPU
*current_cpu
, SI operation
, UQI arg1
, UQI arg2
)
1315 return cr_logic
[operation
][arg1
][arg2
];
1318 /* Cache Manipulation. */
1320 frvbf_insn_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1322 /* If we need to count cycles, then the cache operation will be
1323 initiated from the model profiling functions.
1324 See frvbf_model_.... */
1325 int hsr0
= GET_HSR0 ();
1326 if (GET_HSR0_ICE (hsr0
))
1330 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1331 CPU_LOAD_LENGTH (current_cpu
) = length
;
1332 CPU_LOAD_LOCK (current_cpu
) = lock
;
1336 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1337 frv_cache_preload (cache
, address
, length
, lock
);
1343 frvbf_data_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1345 /* If we need to count cycles, then the cache operation will be
1346 initiated from the model profiling functions.
1347 See frvbf_model_.... */
1348 int hsr0
= GET_HSR0 ();
1349 if (GET_HSR0_DCE (hsr0
))
1353 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1354 CPU_LOAD_LENGTH (current_cpu
) = length
;
1355 CPU_LOAD_LOCK (current_cpu
) = lock
;
1359 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1360 frv_cache_preload (cache
, address
, length
, lock
);
1366 frvbf_insn_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1368 /* If we need to count cycles, then the cache operation will be
1369 initiated from the model profiling functions.
1370 See frvbf_model_.... */
1371 int hsr0
= GET_HSR0 ();
1372 if (GET_HSR0_ICE (hsr0
))
1375 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1378 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1379 frv_cache_unlock (cache
, address
);
1385 frvbf_data_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1387 /* If we need to count cycles, then the cache operation will be
1388 initiated from the model profiling functions.
1389 See frvbf_model_.... */
1390 int hsr0
= GET_HSR0 ();
1391 if (GET_HSR0_DCE (hsr0
))
1394 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1397 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1398 frv_cache_unlock (cache
, address
);
1404 frvbf_insn_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1406 /* Make sure the insn was specified properly. -1 will be passed for ALL
1407 for a icei with A=0. */
1410 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1414 /* If we need to count cycles, then the cache operation will be
1415 initiated from the model profiling functions.
1416 See frvbf_model_.... */
1419 /* Record the all-entries flag for use in profiling. */
1420 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1421 ps
->all_cache_entries
= all
;
1422 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1426 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1428 frv_cache_invalidate_all (cache
, 0/* flush? */);
1430 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1435 frvbf_data_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1437 /* Make sure the insn was specified properly. -1 will be passed for ALL
1438 for a dcei with A=0. */
1441 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1445 /* If we need to count cycles, then the cache operation will be
1446 initiated from the model profiling functions.
1447 See frvbf_model_.... */
1450 /* Record the all-entries flag for use in profiling. */
1451 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1452 ps
->all_cache_entries
= all
;
1453 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1457 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1459 frv_cache_invalidate_all (cache
, 0/* flush? */);
1461 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1466 frvbf_data_cache_flush (SIM_CPU
*current_cpu
, SI address
, int all
)
1468 /* Make sure the insn was specified properly. -1 will be passed for ALL
1469 for a dcef with A=0. */
1472 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1476 /* If we need to count cycles, then the cache operation will be
1477 initiated from the model profiling functions.
1478 See frvbf_model_.... */
1481 /* Record the all-entries flag for use in profiling. */
1482 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1483 ps
->all_cache_entries
= all
;
1484 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1488 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1490 frv_cache_invalidate_all (cache
, 1/* flush? */);
1492 frv_cache_invalidate (cache
, address
, 1/* flush? */);