1 /* frv simulator machine independent profiling code.
3 Copyright (C) 1998, 1999, 2000, 2001, 2003, 2007
4 Free Software Foundation, Inc.
7 This file is part of the GNU simulators.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License along
20 with this program; if not, write to the Free Software Foundation, Inc.,
21 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 #define WANT_CPU_FRVBF
30 #if WITH_PROFILE_MODEL_P
33 #include "profile-fr400.h"
34 #include "profile-fr500.h"
35 #include "profile-fr550.h"
38 reset_gr_flags (SIM_CPU
*cpu
, INT gr
)
40 SIM_DESC sd
= CPU_STATE (cpu
);
41 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
42 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
43 fr400_reset_gr_flags (cpu
, gr
);
44 /* Other machines have no gr flags right now. */
48 reset_fr_flags (SIM_CPU
*cpu
, INT fr
)
50 SIM_DESC sd
= CPU_STATE (cpu
);
51 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
52 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
53 fr400_reset_fr_flags (cpu
, fr
);
54 else if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
55 fr500_reset_fr_flags (cpu
, fr
);
59 reset_acc_flags (SIM_CPU
*cpu
, INT acc
)
61 SIM_DESC sd
= CPU_STATE (cpu
);
62 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
63 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
64 fr400_reset_acc_flags (cpu
, acc
);
65 /* Other machines have no acc flags right now. */
69 reset_cc_flags (SIM_CPU
*cpu
, INT cc
)
71 SIM_DESC sd
= CPU_STATE (cpu
);
72 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
73 fr500_reset_cc_flags (cpu
, cc
);
74 /* Other machines have no cc flags. */
78 set_use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
82 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
83 reset_gr_flags (cpu
, gr
);
84 ps
->cur_gr_complex
|= (((DI
)1) << gr
);
89 set_use_not_gr_complex (SIM_CPU
*cpu
, INT gr
)
93 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
94 ps
->cur_gr_complex
&= ~(((DI
)1) << gr
);
99 use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
103 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
104 return ps
->cur_gr_complex
& (((DI
)1) << gr
);
109 /* Globals flag indicates whether this insn is being modeled. */
110 enum FRV_INSN_MODELING model_insn
= FRV_INSN_NO_MODELING
;
112 /* static buffer for the name of the currently most restrictive hazard. */
113 static char hazard_name
[100] = "";
115 /* Print information about the wait applied to an entire VLIW insn. */
116 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer
[]
118 {1, NO_REQNO
}, {1, NO_REQNO
} /* init with impossible address. */
130 /* A queue of load requests from the data cache. Use to keep track of loads
131 which are still pending. */
132 /* TODO -- some of these are mutually exclusive and can use a union. */
147 enum cache_request request
;
148 } CACHE_QUEUE_ELEMENT
;
150 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
155 CACHE_QUEUE_ELEMENT q
[CACHE_QUEUE_SIZE
];
156 } cache_queue
= {0, 0};
158 /* Queue a request for a load from the cache. The load will be queued as
159 'inactive' and will be requested after the given number
160 of cycles have passed from the point the load is activated. */
162 request_cache_load (SIM_CPU
*cpu
, INT regnum
, int regtype
, int cycles
)
164 CACHE_QUEUE_ELEMENT
*q
;
168 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
170 if (CPU_LOAD_LENGTH (cpu
) == 0)
173 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
174 abort (); /* TODO: Make the queue dynamic */
176 q
= & cache_queue
.q
[cache_queue
.ix
];
179 q
->reqno
= cache_queue
.reqno
++;
180 q
->request
= cache_load
;
181 q
->cache
= CPU_DATA_CACHE (cpu
);
182 q
->address
= CPU_LOAD_ADDRESS (cpu
);
183 q
->length
= CPU_LOAD_LENGTH (cpu
);
184 q
->is_signed
= CPU_LOAD_SIGNED (cpu
);
186 q
->regtype
= regtype
;
190 vliw
= CPU_VLIW (cpu
);
191 slot
= vliw
->next_slot
- 1;
192 q
->slot
= (*vliw
->current_vliw
)[slot
];
194 CPU_LOAD_LENGTH (cpu
) = 0;
197 /* Queue a request to flush the cache. The request will be queued as
198 'inactive' and will be requested after the given number
199 of cycles have passed from the point the request is activated. */
201 request_cache_flush (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
203 CACHE_QUEUE_ELEMENT
*q
;
207 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
208 abort (); /* TODO: Make the queue dynamic */
210 q
= & cache_queue
.q
[cache_queue
.ix
];
213 q
->reqno
= cache_queue
.reqno
++;
214 q
->request
= cache_flush
;
216 q
->address
= CPU_LOAD_ADDRESS (cpu
);
217 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
221 vliw
= CPU_VLIW (cpu
);
222 slot
= vliw
->next_slot
- 1;
223 q
->slot
= (*vliw
->current_vliw
)[slot
];
226 /* Queue a request to invalidate the cache. The request will be queued as
227 'inactive' and will be requested after the given number
228 of cycles have passed from the point the request is activated. */
230 request_cache_invalidate (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
232 CACHE_QUEUE_ELEMENT
*q
;
236 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
237 abort (); /* TODO: Make the queue dynamic */
239 q
= & cache_queue
.q
[cache_queue
.ix
];
242 q
->reqno
= cache_queue
.reqno
++;
243 q
->request
= cache_invalidate
;
245 q
->address
= CPU_LOAD_ADDRESS (cpu
);
246 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
250 vliw
= CPU_VLIW (cpu
);
251 slot
= vliw
->next_slot
- 1;
252 q
->slot
= (*vliw
->current_vliw
)[slot
];
255 /* Queue a request to preload the cache. The request will be queued as
256 'inactive' and will be requested after the given number
257 of cycles have passed from the point the request is activated. */
259 request_cache_preload (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
261 CACHE_QUEUE_ELEMENT
*q
;
265 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
266 abort (); /* TODO: Make the queue dynamic */
268 q
= & cache_queue
.q
[cache_queue
.ix
];
271 q
->reqno
= cache_queue
.reqno
++;
272 q
->request
= cache_preload
;
274 q
->address
= CPU_LOAD_ADDRESS (cpu
);
275 q
->length
= CPU_LOAD_LENGTH (cpu
);
276 q
->lock
= CPU_LOAD_LOCK (cpu
);
280 vliw
= CPU_VLIW (cpu
);
281 slot
= vliw
->next_slot
- 1;
282 q
->slot
= (*vliw
->current_vliw
)[slot
];
284 CPU_LOAD_LENGTH (cpu
) = 0;
287 /* Queue a request to unlock the cache. The request will be queued as
288 'inactive' and will be requested after the given number
289 of cycles have passed from the point the request is activated. */
291 request_cache_unlock (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
293 CACHE_QUEUE_ELEMENT
*q
;
297 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
298 abort (); /* TODO: Make the queue dynamic */
300 q
= & cache_queue
.q
[cache_queue
.ix
];
303 q
->reqno
= cache_queue
.reqno
++;
304 q
->request
= cache_unlock
;
306 q
->address
= CPU_LOAD_ADDRESS (cpu
);
310 vliw
= CPU_VLIW (cpu
);
311 slot
= vliw
->next_slot
- 1;
312 q
->slot
= (*vliw
->current_vliw
)[slot
];
316 submit_cache_request (CACHE_QUEUE_ELEMENT
*q
)
321 frv_cache_request_load (q
->cache
, q
->reqno
, q
->address
, q
->slot
);
324 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
327 case cache_invalidate
:
328 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
332 frv_cache_request_preload (q
->cache
, q
->address
, q
->slot
,
336 frv_cache_request_unlock (q
->cache
, q
->address
, q
->slot
);
343 /* Activate all inactive load requests. */
345 activate_cache_requests (SIM_CPU
*cpu
)
348 for (i
= 0; i
< cache_queue
.ix
; ++i
)
350 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
354 /* Submit the request now if the cycle count is zero. */
356 submit_cache_request (q
);
361 /* Check to see if a load is pending which affects the given register(s).
364 load_pending_for_register (SIM_CPU
*cpu
, int regnum
, int words
, int regtype
)
367 for (i
= 0; i
< cache_queue
.ix
; ++i
)
369 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
371 /* Must be the same kind of register. */
372 if (! q
->active
|| q
->request
!= cache_load
|| q
->regtype
!= regtype
)
375 /* If the registers numbers are equal, then we have a match. */
376 if (q
->regnum
== regnum
)
377 return 1; /* load pending */
379 /* Check for overlap of a load with a multi-word register. */
380 if (regnum
< q
->regnum
)
382 if (regnum
+ words
> q
->regnum
)
385 /* Check for overlap of a multi-word load with the register. */
388 int data_words
= (q
->length
+ sizeof (SI
) - 1) / sizeof (SI
);
389 if (q
->regnum
+ data_words
> regnum
)
394 return 0; /* no load pending */
397 /* Check to see if a cache flush pending which affects the given address. */
399 flush_pending_for_address (SIM_CPU
*cpu
, SI address
)
401 int line_mask
= ~(CPU_DATA_CACHE (cpu
)->line_size
- 1);
403 for (i
= 0; i
< cache_queue
.ix
; ++i
)
405 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
407 /* Must be the same kind of request and active. */
408 if (! q
->active
|| q
->request
!= cache_flush
)
411 /* If the addresses are equal, then we have a match. */
412 if ((q
->address
& line_mask
) == (address
& line_mask
))
413 return 1; /* flush pending */
416 return 0; /* no flush pending */
420 remove_cache_queue_element (SIM_CPU
*cpu
, int i
)
422 /* If we are removing the load of a FR register, then remember which one(s).
424 CACHE_QUEUE_ELEMENT q
= cache_queue
.q
[i
];
426 for (--cache_queue
.ix
; i
< cache_queue
.ix
; ++i
)
427 cache_queue
.q
[i
] = cache_queue
.q
[i
+ 1];
429 /* If we removed a load of a FR register, check to see if any other loads
430 of that register is still queued. If not, then apply the queued post
431 processing time of that register to its latency. Also apply
432 1 extra cycle of latency to the register since it was a floating point
434 if (q
.request
== cache_load
&& q
.regtype
!= REGTYPE_NONE
)
436 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
437 int data_words
= (q
.length
+ sizeof (SI
) - 1) / sizeof (SI
);
439 for (j
= 0; j
< data_words
; ++j
)
441 int regnum
= q
.regnum
+ j
;
442 if (! load_pending_for_register (cpu
, regnum
, 1, q
.regtype
))
444 if (q
.regtype
== REGTYPE_FR
)
446 int *fr
= ps
->fr_busy
;
447 fr
[regnum
] += 1 + ps
->fr_ptime
[regnum
];
448 ps
->fr_ptime
[regnum
] = 0;
455 /* Copy data from the cache buffer to the target register(s). */
457 copy_load_data (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
, int slot
,
458 CACHE_QUEUE_ELEMENT
*q
)
463 if (q
->regtype
== REGTYPE_FR
)
467 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
468 SET_H_FR (q
->regnum
, value
);
472 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
473 SET_H_FR (q
->regnum
, value
);
480 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
481 SET_H_GR (q
->regnum
, value
);
485 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
486 SET_H_GR (q
->regnum
, value
);
491 if (q
->regtype
== REGTYPE_FR
)
495 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
496 SET_H_FR (q
->regnum
, value
);
500 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
501 SET_H_FR (q
->regnum
, value
);
508 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
509 SET_H_GR (q
->regnum
, value
);
513 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
514 SET_H_GR (q
->regnum
, value
);
519 if (q
->regtype
== REGTYPE_FR
)
522 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SF
, 4));
527 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SI
, 4));
531 if (q
->regtype
== REGTYPE_FR
)
533 SET_H_FR_DOUBLE (q
->regnum
,
534 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DF
, 8));
538 SET_H_GR_DOUBLE (q
->regnum
,
539 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DI
, 8));
543 if (q
->regtype
== REGTYPE_FR
)
544 frvbf_h_fr_quad_set_handler (current_cpu
, q
->regnum
,
545 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
549 frvbf_h_gr_quad_set_handler (current_cpu
, q
->regnum
,
550 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
560 request_complete (SIM_CPU
*cpu
, CACHE_QUEUE_ELEMENT
*q
)
563 if (! q
->active
|| q
->cycles
> 0)
566 cache
= CPU_DATA_CACHE (cpu
);
570 /* For loads, we must wait until the data is returned from the cache. */
571 if (frv_cache_data_in_buffer (cache
, 0, q
->address
, q
->reqno
))
573 copy_load_data (cpu
, cache
, 0, q
);
576 if (frv_cache_data_in_buffer (cache
, 1, q
->address
, q
->reqno
))
578 copy_load_data (cpu
, cache
, 1, q
);
584 /* We must wait until the data is flushed. */
585 if (frv_cache_data_flushed (cache
, 0, q
->address
, q
->reqno
))
587 if (frv_cache_data_flushed (cache
, 1, q
->address
, q
->reqno
))
592 /* All other requests are complete once they've been made. */
599 /* Run the insn and data caches through the given number of cycles, taking
600 note of load requests which are fullfilled as a result. */
602 run_caches (SIM_CPU
*cpu
, int cycles
)
604 FRV_CACHE
* data_cache
= CPU_DATA_CACHE (cpu
);
605 FRV_CACHE
* insn_cache
= CPU_INSN_CACHE (cpu
);
607 /* For each cycle, run the caches, noting which requests have been fullfilled
608 and submitting new requests on their designated cycles. */
609 for (i
= 0; i
< cycles
; ++i
)
612 /* Run the caches through 1 cycle. */
613 frv_cache_run (data_cache
, 1);
614 frv_cache_run (insn_cache
, 1);
616 /* Note whether prefetched insn data has been loaded yet. */
617 for (j
= LS
; j
< FRV_CACHE_PIPELINES
; ++j
)
619 if (frv_insn_fetch_buffer
[j
].reqno
!= NO_REQNO
620 && frv_cache_data_in_buffer (insn_cache
, j
,
621 frv_insn_fetch_buffer
[j
].address
,
622 frv_insn_fetch_buffer
[j
].reqno
))
623 frv_insn_fetch_buffer
[j
].reqno
= NO_REQNO
;
626 /* Check to see which requests have been satisfied and which should
628 for (j
= 0; j
< cache_queue
.ix
; ++j
)
630 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[j
];
634 /* If a load has been satisfied, complete the operation and remove it
636 if (request_complete (cpu
, q
))
638 remove_cache_queue_element (cpu
, j
);
643 /* Decrease the cycle count of each queued request.
644 Submit a request for each queued request whose cycle count has
648 submit_cache_request (q
);
654 apply_latency_adjustments (SIM_CPU
*cpu
)
656 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
658 /* update the latencies of the registers. */
659 int *fr
= ps
->fr_busy
;
660 int *acc
= ps
->acc_busy
;
661 for (i
= 0; i
< 64; ++i
)
663 if (ps
->fr_busy_adjust
[i
] > 0)
664 *fr
-= ps
->fr_busy_adjust
[i
]; /* OK if it goes negative. */
665 if (ps
->acc_busy_adjust
[i
] > 0)
666 *acc
-= ps
->acc_busy_adjust
[i
]; /* OK if it goes negative. */
672 /* Account for the number of cycles which have just passed in the latency of
673 various system elements. Works for negative cycles too so that latency
674 can be extended in the case of insn fetch latency.
675 If negative or zero, then no adjustment is necessary. */
677 update_latencies (SIM_CPU
*cpu
, int cycles
)
679 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
681 /* update the latencies of the registers. */
688 int *gr
= ps
->gr_busy
;
689 int *fr
= ps
->fr_busy
;
690 int *acc
= ps
->acc_busy
;
692 /* This loop handles GR, FR and ACC registers. */
693 for (i
= 0; i
< 64; ++i
)
698 reset_gr_flags (cpu
, i
);
702 /* If the busy drops to 0, then mark the register as
706 int *fr_lat
= ps
->fr_latency
+ i
;
708 ps
->fr_busy_adjust
[i
] = 0;
709 /* Only clear flags if this register has no target latency. */
711 reset_fr_flags (cpu
, i
);
715 /* If the busy drops to 0, then mark the register as
719 int *acc_lat
= ps
->acc_latency
+ i
;
721 ps
->acc_busy_adjust
[i
] = 0;
722 /* Only clear flags if this register has no target latency. */
724 reset_acc_flags (cpu
, i
);
732 /* This loop handles CCR registers. */
734 for (i
= 0; i
< 8; ++i
)
739 reset_cc_flags (cpu
, i
);
745 /* This loop handles SPR registers. */
747 for (i
= 0; i
< 4096; ++i
)
755 /* This loop handles resources. */
756 idiv
= ps
->idiv_busy
;
757 fdiv
= ps
->fdiv_busy
;
758 fsqrt
= ps
->fsqrt_busy
;
759 for (i
= 0; i
< 2; ++i
)
761 *idiv
= (*idiv
<= cycles
) ? 0 : (*idiv
- cycles
);
762 *fdiv
= (*fdiv
<= cycles
) ? 0 : (*fdiv
- cycles
);
763 *fsqrt
= (*fsqrt
<= cycles
) ? 0 : (*fsqrt
- cycles
);
768 /* Float and media units can occur in 4 slots on some machines. */
769 flt
= ps
->float_busy
;
770 media
= ps
->media_busy
;
771 for (i
= 0; i
< 4; ++i
)
773 *flt
= (*flt
<= cycles
) ? 0 : (*flt
- cycles
);
774 *media
= (*media
<= cycles
) ? 0 : (*media
- cycles
);
780 /* Print information about the wait for the given number of cycles. */
782 frv_model_trace_wait_cycles (SIM_CPU
*cpu
, int cycles
, const char *hazard_name
)
784 if (TRACE_INSN_P (cpu
) && cycles
> 0)
786 SIM_DESC sd
= CPU_STATE (cpu
);
787 trace_printf (sd
, cpu
, "**** %s wait %d cycles ***\n",
788 hazard_name
, cycles
);
793 trace_vliw_wait_cycles (SIM_CPU
*cpu
)
795 if (TRACE_INSN_P (cpu
))
797 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
798 frv_model_trace_wait_cycles (cpu
, ps
->vliw_wait
, hazard_name
);
802 /* Wait for the given number of cycles. */
804 frv_model_advance_cycles (SIM_CPU
*cpu
, int cycles
)
806 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
807 update_latencies (cpu
, cycles
);
808 run_caches (cpu
, cycles
);
809 PROFILE_MODEL_TOTAL_CYCLES (p
) += cycles
;
813 handle_resource_wait (SIM_CPU
*cpu
)
815 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
816 if (ps
->vliw_wait
!= 0)
817 frv_model_advance_cycles (cpu
, ps
->vliw_wait
);
818 if (ps
->vliw_load_stall
> ps
->vliw_wait
)
819 ps
->vliw_load_stall
-= ps
->vliw_wait
;
821 ps
->vliw_load_stall
= 0;
824 /* Account for the number of cycles until these resources will be available
827 update_target_latencies (SIM_CPU
*cpu
)
829 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
831 /* update the latencies of the registers. */
833 int *gr_lat
= ps
->gr_latency
;
834 int *fr_lat
= ps
->fr_latency
;
835 int *acc_lat
= ps
->acc_latency
;
838 int *gr
= ps
->gr_busy
;
839 int *fr
= ps
->fr_busy
;
840 int *acc
= ps
->acc_busy
;
842 /* This loop handles GR, FR and ACC registers. */
843 for (i
= 0; i
< 64; ++i
)
864 /* This loop handles CCR registers. */
866 ccr_lat
= ps
->ccr_latency
;
867 for (i
= 0; i
< 8; ++i
)
876 /* This loop handles SPR registers. */
878 spr_lat
= ps
->spr_latency
;
879 for (i
= 0; i
< 4096; ++i
)
890 /* Run the caches until all pending cache flushes are complete. */
892 wait_for_flush (SIM_CPU
*cpu
)
894 SI address
= CPU_LOAD_ADDRESS (cpu
);
896 while (flush_pending_for_address (cpu
, address
))
898 frv_model_advance_cycles (cpu
, 1);
901 if (TRACE_INSN_P (cpu
) && wait
)
903 sprintf (hazard_name
, "Data cache flush address %p:", address
);
904 frv_model_trace_wait_cycles (cpu
, wait
, hazard_name
);
908 /* Initialize cycle counting for an insn.
909 FIRST_P is non-zero if this is the first insn in a set of parallel
912 frvbf_model_insn_before (SIM_CPU
*cpu
, int first_p
)
914 SIM_DESC sd
= CPU_STATE (cpu
);
915 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
919 memset (ps
->fr_busy_adjust
, 0, sizeof (ps
->fr_busy_adjust
));
920 memset (ps
->acc_busy_adjust
, 0, sizeof (ps
->acc_busy_adjust
));
926 ps
->vliw_branch_taken
= 0;
927 ps
->vliw_load_stall
= 0;
930 switch (STATE_ARCHITECTURE (sd
)->mach
)
934 fr400_model_insn_before (cpu
, first_p
);
937 fr500_model_insn_before (cpu
, first_p
);
940 fr550_model_insn_before (cpu
, first_p
);
947 wait_for_flush (cpu
);
950 /* Record the cycles computed for an insn.
951 LAST_P is non-zero if this is the last insn in a set of parallel insns,
952 and we update the total cycle count.
953 CYCLES is the cycle count of the insn. */
956 frvbf_model_insn_after (SIM_CPU
*cpu
, int last_p
, int cycles
)
958 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
959 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
960 SIM_DESC sd
= CPU_STATE (cpu
);
962 PROFILE_MODEL_CUR_INSN_CYCLES (p
) = cycles
;
964 /* The number of cycles for a VLIW insn is the maximum number of cycles
965 used by any individual insn within it. */
966 if (cycles
> ps
->vliw_cycles
)
967 ps
->vliw_cycles
= cycles
;
971 /* This is the last insn in a VLIW insn. */
972 struct frv_interrupt_timer
*timer
= & frv_interrupt_state
.timer
;
974 activate_cache_requests (cpu
); /* before advancing cycles. */
975 apply_latency_adjustments (cpu
); /* must go first. */
976 update_target_latencies (cpu
); /* must go next. */
977 frv_model_advance_cycles (cpu
, ps
->vliw_cycles
);
979 PROFILE_MODEL_LOAD_STALL_CYCLES (p
) += ps
->vliw_load_stall
;
981 /* Check the interrupt timer. cycles contains the total cycle count. */
984 cycles
= PROFILE_MODEL_TOTAL_CYCLES (p
);
985 if (timer
->current
% timer
->value
986 + (cycles
- timer
->current
) >= timer
->value
)
987 frv_queue_external_interrupt (cpu
, timer
->interrupt
);
988 timer
->current
= cycles
;
991 ps
->past_first_p
= 0; /* Next one will be the first in a new VLIW. */
992 ps
->branch_address
= -1;
995 ps
->past_first_p
= 1;
997 switch (STATE_ARCHITECTURE (sd
)->mach
)
1000 case bfd_mach_fr450
:
1001 fr400_model_insn_after (cpu
, last_p
, cycles
);
1003 case bfd_mach_fr500
:
1004 fr500_model_insn_after (cpu
, last_p
, cycles
);
1006 case bfd_mach_fr550
:
1007 fr550_model_insn_after (cpu
, last_p
, cycles
);
1015 frvbf_model_branch (SIM_CPU
*current_cpu
, PCADDR target
, int hint
)
1017 /* Record the hint and branch address for use in profiling. */
1018 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1019 ps
->branch_hint
= hint
;
1020 ps
->branch_address
= target
;
1023 /* Top up the latency of the given GR by the given number of cycles. */
1025 update_GR_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1029 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1030 int *gr
= ps
->gr_latency
;
1031 if (gr
[out_GR
] < cycles
)
1032 gr
[out_GR
] = cycles
;
1037 decrease_GR_busy (SIM_CPU
*cpu
, INT in_GR
, int cycles
)
1041 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1042 int *gr
= ps
->gr_busy
;
1043 gr
[in_GR
] -= cycles
;
1047 /* Top up the latency of the given double GR by the number of cycles. */
1049 update_GRdouble_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1053 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1054 int *gr
= ps
->gr_latency
;
1055 if (gr
[out_GR
] < cycles
)
1056 gr
[out_GR
] = cycles
;
1057 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1058 gr
[out_GR
+ 1] = cycles
;
1063 update_GR_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1067 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1068 int *gr
= ps
->gr_latency
;
1070 /* The latency of the GR will be at least the number of cycles used
1072 if (gr
[out_GR
] < cycles
)
1073 gr
[out_GR
] = cycles
;
1075 /* The latency will also depend on how long it takes to retrieve the
1076 data from the cache or memory. Assume that the load is issued
1077 after the last cycle of the insn. */
1078 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1083 update_GRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1087 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1088 int *gr
= ps
->gr_latency
;
1090 /* The latency of the GR will be at least the number of cycles used
1092 if (gr
[out_GR
] < cycles
)
1093 gr
[out_GR
] = cycles
;
1094 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1095 gr
[out_GR
+ 1] = cycles
;
1097 /* The latency will also depend on how long it takes to retrieve the
1098 data from the cache or memory. Assume that the load is issued
1099 after the last cycle of the insn. */
1100 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1105 update_GR_latency_for_swap (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1107 update_GR_latency_for_load (cpu
, out_GR
, cycles
);
1110 /* Top up the latency of the given FR by the given number of cycles. */
1112 update_FR_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1116 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1117 int *fr
= ps
->fr_latency
;
1118 if (fr
[out_FR
] < cycles
)
1119 fr
[out_FR
] = cycles
;
1123 /* Top up the latency of the given double FR by the number of cycles. */
1125 update_FRdouble_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1129 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1130 int *fr
= ps
->fr_latency
;
1131 if (fr
[out_FR
] < cycles
)
1132 fr
[out_FR
] = cycles
;
1133 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1134 fr
[out_FR
+ 1] = cycles
;
1139 update_FR_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1143 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1144 int *fr
= ps
->fr_latency
;
1146 /* The latency of the FR will be at least the number of cycles used
1148 if (fr
[out_FR
] < cycles
)
1149 fr
[out_FR
] = cycles
;
1151 /* The latency will also depend on how long it takes to retrieve the
1152 data from the cache or memory. Assume that the load is issued
1153 after the last cycle of the insn. */
1154 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1159 update_FRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1163 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1164 int *fr
= ps
->fr_latency
;
1166 /* The latency of the FR will be at least the number of cycles used
1168 if (fr
[out_FR
] < cycles
)
1169 fr
[out_FR
] = cycles
;
1170 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1171 fr
[out_FR
+ 1] = cycles
;
1173 /* The latency will also depend on how long it takes to retrieve the
1174 data from the cache or memory. Assume that the load is issued
1175 after the last cycle of the insn. */
1176 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1180 /* Top up the post-processing time of the given FR by the given number of
1183 update_FR_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1187 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1188 /* If a load is pending on this register, then add the cycles to
1189 the post processing time for this register. Otherwise apply it
1190 directly to the latency of the register. */
1191 if (! load_pending_for_register (cpu
, out_FR
, 1, REGTYPE_FR
))
1193 int *fr
= ps
->fr_latency
;
1194 fr
[out_FR
] += cycles
;
1197 ps
->fr_ptime
[out_FR
] += cycles
;
1202 update_FRdouble_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1206 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1207 /* If a load is pending on this register, then add the cycles to
1208 the post processing time for this register. Otherwise apply it
1209 directly to the latency of the register. */
1210 if (! load_pending_for_register (cpu
, out_FR
, 2, REGTYPE_FR
))
1212 int *fr
= ps
->fr_latency
;
1213 fr
[out_FR
] += cycles
;
1215 fr
[out_FR
+ 1] += cycles
;
1219 ps
->fr_ptime
[out_FR
] += cycles
;
1221 ps
->fr_ptime
[out_FR
+ 1] += cycles
;
1226 /* Top up the post-processing time of the given ACC by the given number of
1229 update_ACC_ptime (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1233 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1234 /* No load can be pending on this register. Apply the cycles
1235 directly to the latency of the register. */
1236 int *acc
= ps
->acc_latency
;
1237 acc
[out_ACC
] += cycles
;
1241 /* Top up the post-processing time of the given SPR by the given number of
1244 update_SPR_ptime (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1248 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1249 /* No load can be pending on this register. Apply the cycles
1250 directly to the latency of the register. */
1251 int *spr
= ps
->spr_latency
;
1252 spr
[out_SPR
] += cycles
;
1257 decrease_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1261 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1262 int *acc
= ps
->acc_busy
;
1263 acc
[out_ACC
] -= cycles
;
1264 if (ps
->acc_busy_adjust
[out_ACC
] >= 0
1265 && cycles
> ps
->acc_busy_adjust
[out_ACC
])
1266 ps
->acc_busy_adjust
[out_ACC
] = cycles
;
1271 increase_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1275 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1276 int *acc
= ps
->acc_busy
;
1277 acc
[out_ACC
] += cycles
;
1282 enforce_full_acc_latency (SIM_CPU
*cpu
, INT in_ACC
)
1284 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1285 ps
->acc_busy_adjust
[in_ACC
] = -1;
1289 decrease_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1293 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1294 int *fr
= ps
->fr_busy
;
1295 fr
[out_FR
] -= cycles
;
1296 if (ps
->fr_busy_adjust
[out_FR
] >= 0
1297 && cycles
> ps
->fr_busy_adjust
[out_FR
])
1298 ps
->fr_busy_adjust
[out_FR
] = cycles
;
1303 increase_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1307 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1308 int *fr
= ps
->fr_busy
;
1309 fr
[out_FR
] += cycles
;
1313 /* Top up the latency of the given ACC by the given number of cycles. */
1315 update_ACC_latency (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1319 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1320 int *acc
= ps
->acc_latency
;
1321 if (acc
[out_ACC
] < cycles
)
1322 acc
[out_ACC
] = cycles
;
1326 /* Top up the latency of the given CCR by the given number of cycles. */
1328 update_CCR_latency (SIM_CPU
*cpu
, INT out_CCR
, int cycles
)
1332 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1333 int *ccr
= ps
->ccr_latency
;
1334 if (ccr
[out_CCR
] < cycles
)
1335 ccr
[out_CCR
] = cycles
;
1339 /* Top up the latency of the given SPR by the given number of cycles. */
1341 update_SPR_latency (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1345 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1346 int *spr
= ps
->spr_latency
;
1347 if (spr
[out_SPR
] < cycles
)
1348 spr
[out_SPR
] = cycles
;
1352 /* Top up the latency of the given integer division resource by the given
1353 number of cycles. */
1355 update_idiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1357 /* operate directly on the busy cycles since each resource can only
1358 be used once in a VLIW insn. */
1359 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1360 int *r
= ps
->idiv_busy
;
1361 r
[in_resource
] = cycles
;
1364 /* Set the latency of the given resource to the given number of cycles. */
1366 update_fdiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1368 /* operate directly on the busy cycles since each resource can only
1369 be used once in a VLIW insn. */
1370 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1371 int *r
= ps
->fdiv_busy
;
1372 r
[in_resource
] = cycles
;
1375 /* Set the latency of the given resource to the given number of cycles. */
1377 update_fsqrt_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1379 /* operate directly on the busy cycles since each resource can only
1380 be used once in a VLIW insn. */
1381 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1382 int *r
= ps
->fsqrt_busy
;
1383 r
[in_resource
] = cycles
;
1386 /* Set the latency of the given resource to the given number of cycles. */
1388 update_float_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1390 /* operate directly on the busy cycles since each resource can only
1391 be used once in a VLIW insn. */
1392 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1393 int *r
= ps
->float_busy
;
1394 r
[in_resource
] = cycles
;
1398 update_media_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1400 /* operate directly on the busy cycles since each resource can only
1401 be used once in a VLIW insn. */
1402 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1403 int *r
= ps
->media_busy
;
1404 r
[in_resource
] = cycles
;
1407 /* Set the branch penalty to the given number of cycles. */
1409 update_branch_penalty (SIM_CPU
*cpu
, int cycles
)
1411 /* operate directly on the busy cycles since only one branch can occur
1413 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1414 ps
->branch_penalty
= cycles
;
1417 /* Check the availability of the given GR register and update the number
1418 of cycles the current VLIW insn must wait until it is available. */
1420 vliw_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1422 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1423 int *gr
= ps
->gr_busy
;
1424 /* If the latency of the register is greater than the current wait
1425 then update the current wait. */
1426 if (in_GR
>= 0 && gr
[in_GR
] > ps
->vliw_wait
)
1428 if (TRACE_INSN_P (cpu
))
1429 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1430 ps
->vliw_wait
= gr
[in_GR
];
1434 /* Check the availability of the given GR register and update the number
1435 of cycles the current VLIW insn must wait until it is available. */
1437 vliw_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1439 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1440 int *gr
= ps
->gr_busy
;
1441 /* If the latency of the register is greater than the current wait
1442 then update the current wait. */
1445 if (gr
[in_GR
] > ps
->vliw_wait
)
1447 if (TRACE_INSN_P (cpu
))
1448 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1449 ps
->vliw_wait
= gr
[in_GR
];
1451 if (in_GR
< 63 && gr
[in_GR
+ 1] > ps
->vliw_wait
)
1453 if (TRACE_INSN_P (cpu
))
1454 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
+ 1);
1455 ps
->vliw_wait
= gr
[in_GR
+ 1];
1460 /* Check the availability of the given FR register and update the number
1461 of cycles the current VLIW insn must wait until it is available. */
1463 vliw_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1465 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1466 int *fr
= ps
->fr_busy
;
1467 /* If the latency of the register is greater than the current wait
1468 then update the current wait. */
1469 if (in_FR
>= 0 && fr
[in_FR
] > ps
->vliw_wait
)
1471 if (TRACE_INSN_P (cpu
))
1472 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1473 ps
->vliw_wait
= fr
[in_FR
];
1477 /* Check the availability of the given GR register and update the number
1478 of cycles the current VLIW insn must wait until it is available. */
1480 vliw_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1482 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1483 int *fr
= ps
->fr_busy
;
1484 /* If the latency of the register is greater than the current wait
1485 then update the current wait. */
1488 if (fr
[in_FR
] > ps
->vliw_wait
)
1490 if (TRACE_INSN_P (cpu
))
1491 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1492 ps
->vliw_wait
= fr
[in_FR
];
1494 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->vliw_wait
)
1496 if (TRACE_INSN_P (cpu
))
1497 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1498 ps
->vliw_wait
= fr
[in_FR
+ 1];
1503 /* Check the availability of the given CCR register and update the number
1504 of cycles the current VLIW insn must wait until it is available. */
1506 vliw_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1508 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1509 int *ccr
= ps
->ccr_busy
;
1510 /* If the latency of the register is greater than the current wait
1511 then update the current wait. */
1512 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->vliw_wait
)
1514 if (TRACE_INSN_P (cpu
))
1517 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
-4);
1519 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1521 ps
->vliw_wait
= ccr
[in_CCR
];
1525 /* Check the availability of the given ACC register and update the number
1526 of cycles the current VLIW insn must wait until it is available. */
1528 vliw_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1530 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1531 int *acc
= ps
->acc_busy
;
1532 /* If the latency of the register is greater than the current wait
1533 then update the current wait. */
1534 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->vliw_wait
)
1536 if (TRACE_INSN_P (cpu
))
1537 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1538 ps
->vliw_wait
= acc
[in_ACC
];
1542 /* Check the availability of the given SPR register and update the number
1543 of cycles the current VLIW insn must wait until it is available. */
1545 vliw_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1547 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1548 int *spr
= ps
->spr_busy
;
1549 /* If the latency of the register is greater than the current wait
1550 then update the current wait. */
1551 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->vliw_wait
)
1553 if (TRACE_INSN_P (cpu
))
1554 sprintf (hazard_name
, "Data hazard for spr %d:", in_SPR
);
1555 ps
->vliw_wait
= spr
[in_SPR
];
1559 /* Check the availability of the given integer division resource and update
1560 the number of cycles the current VLIW insn must wait until it is available.
1563 vliw_wait_for_idiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1565 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1566 int *r
= ps
->idiv_busy
;
1567 /* If the latency of the resource is greater than the current wait
1568 then update the current wait. */
1569 if (r
[in_resource
] > ps
->vliw_wait
)
1571 if (TRACE_INSN_P (cpu
))
1573 sprintf (hazard_name
, "Resource hazard for integer division in slot I%d:", in_resource
);
1575 ps
->vliw_wait
= r
[in_resource
];
1579 /* Check the availability of the given float division resource and update
1580 the number of cycles the current VLIW insn must wait until it is available.
1583 vliw_wait_for_fdiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1585 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1586 int *r
= ps
->fdiv_busy
;
1587 /* If the latency of the resource is greater than the current wait
1588 then update the current wait. */
1589 if (r
[in_resource
] > ps
->vliw_wait
)
1591 if (TRACE_INSN_P (cpu
))
1593 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", in_resource
);
1595 ps
->vliw_wait
= r
[in_resource
];
1599 /* Check the availability of the given float square root resource and update
1600 the number of cycles the current VLIW insn must wait until it is available.
1603 vliw_wait_for_fsqrt_resource (SIM_CPU
*cpu
, INT in_resource
)
1605 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1606 int *r
= ps
->fsqrt_busy
;
1607 /* If the latency of the resource is greater than the current wait
1608 then update the current wait. */
1609 if (r
[in_resource
] > ps
->vliw_wait
)
1611 if (TRACE_INSN_P (cpu
))
1613 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", in_resource
);
1615 ps
->vliw_wait
= r
[in_resource
];
1619 /* Check the availability of the given float unit resource and update
1620 the number of cycles the current VLIW insn must wait until it is available.
1623 vliw_wait_for_float_resource (SIM_CPU
*cpu
, INT in_resource
)
1625 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1626 int *r
= ps
->float_busy
;
1627 /* If the latency of the resource is greater than the current wait
1628 then update the current wait. */
1629 if (r
[in_resource
] > ps
->vliw_wait
)
1631 if (TRACE_INSN_P (cpu
))
1633 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", in_resource
);
1635 ps
->vliw_wait
= r
[in_resource
];
1639 /* Check the availability of the given media unit resource and update
1640 the number of cycles the current VLIW insn must wait until it is available.
1643 vliw_wait_for_media_resource (SIM_CPU
*cpu
, INT in_resource
)
1645 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1646 int *r
= ps
->media_busy
;
1647 /* If the latency of the resource is greater than the current wait
1648 then update the current wait. */
1649 if (r
[in_resource
] > ps
->vliw_wait
)
1651 if (TRACE_INSN_P (cpu
))
1653 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", in_resource
);
1655 ps
->vliw_wait
= r
[in_resource
];
1659 /* Run the caches until all requests for the given register(s) are satisfied. */
1661 load_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1666 while (load_pending_for_register (cpu
, in_GR
, 1/*words*/, REGTYPE_NONE
))
1668 frv_model_advance_cycles (cpu
, 1);
1673 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1674 ps
->vliw_wait
+= wait
;
1675 ps
->vliw_load_stall
+= wait
;
1676 if (TRACE_INSN_P (cpu
))
1677 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1683 load_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1687 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1690 while (load_pending_for_register (cpu
, in_FR
, 1/*words*/, REGTYPE_FR
))
1692 frv_model_advance_cycles (cpu
, 1);
1695 /* Post processing time may have been added to the register's
1696 latency after the loads were processed. Account for that too.
1702 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1704 /* Update the vliw_wait with the number of cycles we waited for the
1705 load and any post-processing. */
1708 ps
->vliw_wait
+= wait
;
1709 ps
->vliw_load_stall
+= wait
;
1710 if (TRACE_INSN_P (cpu
))
1711 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1717 load_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1722 while (load_pending_for_register (cpu
, in_GR
, 2/*words*/, REGTYPE_NONE
))
1724 frv_model_advance_cycles (cpu
, 1);
1729 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1730 ps
->vliw_wait
+= wait
;
1731 ps
->vliw_load_stall
+= wait
;
1732 if (TRACE_INSN_P (cpu
))
1733 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1739 load_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1743 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1746 while (load_pending_for_register (cpu
, in_FR
, 2/*words*/, REGTYPE_FR
))
1748 frv_model_advance_cycles (cpu
, 1);
1751 /* Post processing time may have been added to the registers'
1752 latencies after the loads were processed. Account for that too.
1758 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1764 wait
+= fr
[in_FR
+ 1];
1765 frv_model_advance_cycles (cpu
, fr
[in_FR
+ 1]);
1768 /* Update the vliw_wait with the number of cycles we waited for the
1769 load and any post-processing. */
1772 ps
->vliw_wait
+= wait
;
1773 ps
->vliw_load_stall
+= wait
;
1774 if (TRACE_INSN_P (cpu
))
1775 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1781 enforce_full_fr_latency (SIM_CPU
*cpu
, INT in_FR
)
1783 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1784 ps
->fr_busy_adjust
[in_FR
] = -1;
1787 /* Calculate how long the post processing for a floating point insn must
1788 wait for resources to become available. */
1790 post_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1792 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1793 int *fr
= ps
->fr_busy
;
1795 if (in_FR
>= 0 && fr
[in_FR
] > ps
->post_wait
)
1797 ps
->post_wait
= fr
[in_FR
];
1798 if (TRACE_INSN_P (cpu
))
1799 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1803 /* Calculate how long the post processing for a floating point insn must
1804 wait for resources to become available. */
1806 post_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1808 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1809 int *fr
= ps
->fr_busy
;
1813 if (fr
[in_FR
] > ps
->post_wait
)
1815 ps
->post_wait
= fr
[in_FR
];
1816 if (TRACE_INSN_P (cpu
))
1817 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1819 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->post_wait
)
1821 ps
->post_wait
= fr
[in_FR
+ 1];
1822 if (TRACE_INSN_P (cpu
))
1823 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1829 post_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1831 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1832 int *acc
= ps
->acc_busy
;
1834 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->post_wait
)
1836 ps
->post_wait
= acc
[in_ACC
];
1837 if (TRACE_INSN_P (cpu
))
1838 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1843 post_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1845 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1846 int *ccr
= ps
->ccr_busy
;
1848 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->post_wait
)
1850 ps
->post_wait
= ccr
[in_CCR
];
1851 if (TRACE_INSN_P (cpu
))
1854 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
- 4);
1856 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1862 post_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1864 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1865 int *spr
= ps
->spr_busy
;
1867 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->post_wait
)
1869 ps
->post_wait
= spr
[in_SPR
];
1870 if (TRACE_INSN_P (cpu
))
1871 sprintf (hazard_name
, "Data hazard for spr[%d]:", in_SPR
);
1876 post_wait_for_fdiv (SIM_CPU
*cpu
, INT slot
)
1878 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1879 int *fdiv
= ps
->fdiv_busy
;
1881 /* Multiple floating point divisions in the same slot need only wait 1
1883 if (fdiv
[slot
] > 0 && 1 > ps
->post_wait
)
1886 if (TRACE_INSN_P (cpu
))
1888 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", slot
);
1894 post_wait_for_fsqrt (SIM_CPU
*cpu
, INT slot
)
1896 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1897 int *fsqrt
= ps
->fsqrt_busy
;
1899 /* Multiple floating point square roots in the same slot need only wait 1
1901 if (fsqrt
[slot
] > 0 && 1 > ps
->post_wait
)
1904 if (TRACE_INSN_P (cpu
))
1906 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", slot
);
1912 post_wait_for_float (SIM_CPU
*cpu
, INT slot
)
1914 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1915 int *flt
= ps
->float_busy
;
1917 /* Multiple floating point square roots in the same slot need only wait 1
1919 if (flt
[slot
] > ps
->post_wait
)
1921 ps
->post_wait
= flt
[slot
];
1922 if (TRACE_INSN_P (cpu
))
1924 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", slot
);
1930 post_wait_for_media (SIM_CPU
*cpu
, INT slot
)
1932 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1933 int *media
= ps
->media_busy
;
1935 /* Multiple floating point square roots in the same slot need only wait 1
1937 if (media
[slot
] > ps
->post_wait
)
1939 ps
->post_wait
= media
[slot
];
1940 if (TRACE_INSN_P (cpu
))
1942 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", slot
);
1947 /* Print cpu-specific profile information. */
1948 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1951 print_cache (SIM_CPU
*cpu
, FRV_CACHE
*cache
, const char *cache_name
)
1953 SIM_DESC sd
= CPU_STATE (cpu
);
1960 sim_io_printf (sd
, " %s Cache\n\n", cache_name
);
1961 accesses
= cache
->statistics
.accesses
;
1962 sim_io_printf (sd
, " Total accesses: %s\n", COMMAS (accesses
));
1966 unsigned hits
= cache
->statistics
.hits
;
1967 sim_io_printf (sd
, " Hits: %s\n", COMMAS (hits
));
1968 rate
= (float)hits
/ accesses
;
1969 sim_io_printf (sd
, " Hit rate: %.2f%%\n", rate
* 100);
1973 sim_io_printf (sd
, " Model %s has no %s cache\n",
1974 MODEL_NAME (CPU_MODEL (cpu
)), cache_name
);
1976 sim_io_printf (sd
, "\n");
1979 /* This table must correspond to the UNIT_ATTR table in
1980 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1981 listed since the others cannot occur after mapping. */
1986 "I0", "I1", "I01", "I2", "I3", "IALL",
1987 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1993 print_parallel (SIM_CPU
*cpu
, int verbose
)
1995 SIM_DESC sd
= CPU_STATE (cpu
);
1996 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
1997 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1998 unsigned total
, vliw
;
2002 sim_io_printf (sd
, "Model %s Parallelization\n\n",
2003 MODEL_NAME (CPU_MODEL (cpu
)));
2005 total
= PROFILE_TOTAL_INSN_COUNT (p
);
2006 sim_io_printf (sd
, " Total instructions: %s\n", COMMAS (total
));
2007 vliw
= ps
->vliw_insns
;
2008 sim_io_printf (sd
, " VLIW instructions: %s\n", COMMAS (vliw
));
2009 average
= (float)total
/ vliw
;
2010 sim_io_printf (sd
, " Average VLIW length: %.2f\n", average
);
2011 average
= (float)PROFILE_MODEL_TOTAL_CYCLES (p
) / vliw
;
2012 sim_io_printf (sd
, " Cycles per VLIW instruction: %.2f\n", average
);
2013 average
= (float)total
/ PROFILE_MODEL_TOTAL_CYCLES (p
);
2014 sim_io_printf (sd
, " Instructions per cycle: %.2f\n", average
);
2020 int max_name_len
= 0;
2021 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2023 if (INSNS_IN_SLOT (i
))
2026 if (INSNS_IN_SLOT (i
) > max_val
)
2027 max_val
= INSNS_IN_SLOT (i
);
2028 len
= strlen (slot_names
[i
]);
2029 if (len
> max_name_len
)
2035 sim_io_printf (sd
, "\n");
2036 sim_io_printf (sd
, " Instructions per slot:\n");
2037 sim_io_printf (sd
, "\n");
2038 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2040 if (INSNS_IN_SLOT (i
) != 0)
2042 sim_io_printf (sd
, " %*s: %*s: ",
2043 max_name_len
, slot_names
[i
],
2044 max_val
< 10000 ? 5 : 10,
2045 COMMAS (INSNS_IN_SLOT (i
)));
2046 sim_profile_print_bar (sd
, PROFILE_HISTOGRAM_WIDTH
,
2049 sim_io_printf (sd
, "\n");
2052 } /* details to print */
2055 sim_io_printf (sd
, "\n");
2059 frv_profile_info (SIM_CPU
*cpu
, int verbose
)
2061 /* FIXME: Need to add smp support. */
2062 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
2064 #if WITH_PROFILE_PARALLEL_P
2065 if (PROFILE_FLAGS (p
) [PROFILE_PARALLEL_IDX
])
2066 print_parallel (cpu
, verbose
);
2069 #if WITH_PROFILE_CACHE_P
2070 if (PROFILE_FLAGS (p
) [PROFILE_CACHE_IDX
])
2072 SIM_DESC sd
= CPU_STATE (cpu
);
2073 sim_io_printf (sd
, "Model %s Cache Statistics\n\n",
2074 MODEL_NAME (CPU_MODEL (cpu
)));
2075 print_cache (cpu
, CPU_INSN_CACHE (cpu
), "Instruction");
2076 print_cache (cpu
, CPU_DATA_CACHE (cpu
), "Data");
2078 #endif /* WITH_PROFILE_CACHE_P */
2081 /* A hack to get registers referenced for profiling. */
2082 SI
frv_ref_SI (SI ref
) {return ref
;}
2083 #endif /* WITH_PROFILE_MODEL_P */