]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - sim/frv/profile.c
Copyright updates for 2007.
[thirdparty/binutils-gdb.git] / sim / frv / profile.c
CommitLineData
b34f6357
DB
1/* frv simulator machine independent profiling code.
2
6aba47ca
DJ
3 Copyright (C) 1998, 1999, 2000, 2001, 2003, 2007
4 Free Software Foundation, Inc.
b34f6357
DB
5 Contributed by Red Hat
6
7This file is part of the GNU simulators.
8
9This program is free software; you can redistribute it and/or modify
10it under the terms of the GNU General Public License as published by
11the Free Software Foundation; either version 2, or (at your option)
12any later version.
13
14This program is distributed in the hope that it will be useful,
15but WITHOUT ANY WARRANTY; without even the implied warranty of
16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17GNU General Public License for more details.
18
19You should have received a copy of the GNU General Public License along
20with this program; if not, write to the Free Software Foundation, Inc.,
2159 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23*/
24#define WANT_CPU
25#define WANT_CPU_FRVBF
26
27#include "sim-main.h"
28#include "bfd.h"
29
30#if WITH_PROFILE_MODEL_P
31
32#include "profile.h"
33#include "profile-fr400.h"
34#include "profile-fr500.h"
e930b1f5 35#include "profile-fr550.h"
b34f6357
DB
36
37static void
38reset_gr_flags (SIM_CPU *cpu, INT gr)
39{
40 SIM_DESC sd = CPU_STATE (cpu);
676a64f4
RS
41 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
42 || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
b34f6357
DB
43 fr400_reset_gr_flags (cpu, gr);
44 /* Other machines have no gr flags right now. */
45}
46
47static void
48reset_fr_flags (SIM_CPU *cpu, INT fr)
49{
50 SIM_DESC sd = CPU_STATE (cpu);
676a64f4
RS
51 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
52 || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
b34f6357
DB
53 fr400_reset_fr_flags (cpu, fr);
54 else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
55 fr500_reset_fr_flags (cpu, fr);
56}
57
58static void
59reset_acc_flags (SIM_CPU *cpu, INT acc)
60{
61 SIM_DESC sd = CPU_STATE (cpu);
676a64f4
RS
62 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
63 || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
b34f6357
DB
64 fr400_reset_acc_flags (cpu, acc);
65 /* Other machines have no acc flags right now. */
66}
67
68static void
69reset_cc_flags (SIM_CPU *cpu, INT cc)
70{
71 SIM_DESC sd = CPU_STATE (cpu);
72 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
73 fr500_reset_cc_flags (cpu, cc);
74 /* Other machines have no cc flags. */
75}
76
77void
78set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
79{
80 if (gr != -1)
81 {
82 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
83 reset_gr_flags (cpu, gr);
84 ps->cur_gr_complex |= (((DI)1) << gr);
85 }
86}
87
88void
89set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
90{
91 if (gr != -1)
92 {
93 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
94 ps->cur_gr_complex &= ~(((DI)1) << gr);
95 }
96}
97
98int
99use_is_gr_complex (SIM_CPU *cpu, INT gr)
100{
101 if (gr != -1)
102 {
103 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
104 return ps->cur_gr_complex & (((DI)1) << gr);
105 }
106 return 0;
107}
108
109/* Globals flag indicates whether this insn is being modeled. */
110enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
111
112/* static buffer for the name of the currently most restrictive hazard. */
113static char hazard_name[100] = "";
114
115/* Print information about the wait applied to an entire VLIW insn. */
116FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
117= {
118 {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address. */
119};
120
121enum cache_request
122{
123 cache_load,
124 cache_invalidate,
125 cache_flush,
126 cache_preload,
127 cache_unlock
128};
129
130/* A queue of load requests from the data cache. Use to keep track of loads
131 which are still pending. */
132/* TODO -- some of these are mutually exclusive and can use a union. */
133typedef struct
134{
135 FRV_CACHE *cache;
136 unsigned reqno;
137 SI address;
138 int length;
139 int is_signed;
140 int regnum;
141 int cycles;
142 int regtype;
143 int lock;
144 int all;
145 int slot;
146 int active;
147 enum cache_request request;
148} CACHE_QUEUE_ELEMENT;
149
150#define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
151struct
152{
153 unsigned reqno;
154 int ix;
155 CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
156} cache_queue = {0, 0};
157
158/* Queue a request for a load from the cache. The load will be queued as
159 'inactive' and will be requested after the given number
160 of cycles have passed from the point the load is activated. */
161void
162request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
163{
164 CACHE_QUEUE_ELEMENT *q;
165 FRV_VLIW *vliw;
166 int slot;
167
168 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
169 zero. */
170 if (CPU_LOAD_LENGTH (cpu) == 0)
171 return;
172
173 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
174 abort (); /* TODO: Make the queue dynamic */
175
176 q = & cache_queue.q[cache_queue.ix];
177 ++cache_queue.ix;
178
179 q->reqno = cache_queue.reqno++;
180 q->request = cache_load;
181 q->cache = CPU_DATA_CACHE (cpu);
182 q->address = CPU_LOAD_ADDRESS (cpu);
183 q->length = CPU_LOAD_LENGTH (cpu);
184 q->is_signed = CPU_LOAD_SIGNED (cpu);
185 q->regnum = regnum;
186 q->regtype = regtype;
187 q->cycles = cycles;
188 q->active = 0;
189
190 vliw = CPU_VLIW (cpu);
191 slot = vliw->next_slot - 1;
192 q->slot = (*vliw->current_vliw)[slot];
193
194 CPU_LOAD_LENGTH (cpu) = 0;
195}
196
197/* Queue a request to flush the cache. The request will be queued as
198 'inactive' and will be requested after the given number
199 of cycles have passed from the point the request is activated. */
200void
201request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
202{
203 CACHE_QUEUE_ELEMENT *q;
204 FRV_VLIW *vliw;
205 int slot;
206
207 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
208 abort (); /* TODO: Make the queue dynamic */
209
210 q = & cache_queue.q[cache_queue.ix];
211 ++cache_queue.ix;
212
213 q->reqno = cache_queue.reqno++;
214 q->request = cache_flush;
215 q->cache = cache;
216 q->address = CPU_LOAD_ADDRESS (cpu);
217 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
218 q->cycles = cycles;
219 q->active = 0;
220
221 vliw = CPU_VLIW (cpu);
222 slot = vliw->next_slot - 1;
223 q->slot = (*vliw->current_vliw)[slot];
224}
225
226/* Queue a request to invalidate the cache. The request will be queued as
227 'inactive' and will be requested after the given number
228 of cycles have passed from the point the request is activated. */
229void
230request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
231{
232 CACHE_QUEUE_ELEMENT *q;
233 FRV_VLIW *vliw;
234 int slot;
235
236 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
237 abort (); /* TODO: Make the queue dynamic */
238
239 q = & cache_queue.q[cache_queue.ix];
240 ++cache_queue.ix;
241
242 q->reqno = cache_queue.reqno++;
243 q->request = cache_invalidate;
244 q->cache = cache;
245 q->address = CPU_LOAD_ADDRESS (cpu);
246 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
247 q->cycles = cycles;
248 q->active = 0;
249
250 vliw = CPU_VLIW (cpu);
251 slot = vliw->next_slot - 1;
252 q->slot = (*vliw->current_vliw)[slot];
253}
254
255/* Queue a request to preload the cache. The request will be queued as
256 'inactive' and will be requested after the given number
257 of cycles have passed from the point the request is activated. */
258void
259request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
260{
261 CACHE_QUEUE_ELEMENT *q;
262 FRV_VLIW *vliw;
263 int slot;
264
265 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
266 abort (); /* TODO: Make the queue dynamic */
267
268 q = & cache_queue.q[cache_queue.ix];
269 ++cache_queue.ix;
270
271 q->reqno = cache_queue.reqno++;
272 q->request = cache_preload;
273 q->cache = cache;
274 q->address = CPU_LOAD_ADDRESS (cpu);
275 q->length = CPU_LOAD_LENGTH (cpu);
276 q->lock = CPU_LOAD_LOCK (cpu);
277 q->cycles = cycles;
278 q->active = 0;
279
280 vliw = CPU_VLIW (cpu);
281 slot = vliw->next_slot - 1;
282 q->slot = (*vliw->current_vliw)[slot];
283
284 CPU_LOAD_LENGTH (cpu) = 0;
285}
286
287/* Queue a request to unlock the cache. The request will be queued as
288 'inactive' and will be requested after the given number
289 of cycles have passed from the point the request is activated. */
290void
291request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
292{
293 CACHE_QUEUE_ELEMENT *q;
294 FRV_VLIW *vliw;
295 int slot;
296
297 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
298 abort (); /* TODO: Make the queue dynamic */
299
300 q = & cache_queue.q[cache_queue.ix];
301 ++cache_queue.ix;
302
303 q->reqno = cache_queue.reqno++;
304 q->request = cache_unlock;
305 q->cache = cache;
306 q->address = CPU_LOAD_ADDRESS (cpu);
307 q->cycles = cycles;
308 q->active = 0;
309
310 vliw = CPU_VLIW (cpu);
311 slot = vliw->next_slot - 1;
312 q->slot = (*vliw->current_vliw)[slot];
313}
314
315static void
316submit_cache_request (CACHE_QUEUE_ELEMENT *q)
317{
318 switch (q->request)
319 {
320 case cache_load:
321 frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
322 break;
323 case cache_flush:
324 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
325 q->all, 1/*flush*/);
326 break;
327 case cache_invalidate:
328 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
329 q->all, 0/*flush*/);
330 break;
331 case cache_preload:
332 frv_cache_request_preload (q->cache, q->address, q->slot,
333 q->length, q->lock);
334 break;
335 case cache_unlock:
336 frv_cache_request_unlock (q->cache, q->address, q->slot);
337 break;
338 default:
339 abort ();
340 }
341}
342
343/* Activate all inactive load requests. */
344static void
345activate_cache_requests (SIM_CPU *cpu)
346{
347 int i;
348 for (i = 0; i < cache_queue.ix; ++i)
349 {
350 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
351 if (! q->active)
352 {
353 q->active = 1;
354 /* Submit the request now if the cycle count is zero. */
355 if (q->cycles == 0)
356 submit_cache_request (q);
357 }
358 }
359}
360
361/* Check to see if a load is pending which affects the given register(s).
362 */
363int
364load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
365{
366 int i;
367 for (i = 0; i < cache_queue.ix; ++i)
368 {
369 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
370
371 /* Must be the same kind of register. */
372 if (! q->active || q->request != cache_load || q->regtype != regtype)
373 continue;
374
375 /* If the registers numbers are equal, then we have a match. */
376 if (q->regnum == regnum)
377 return 1; /* load pending */
378
379 /* Check for overlap of a load with a multi-word register. */
380 if (regnum < q->regnum)
381 {
382 if (regnum + words > q->regnum)
383 return 1;
384 }
385 /* Check for overlap of a multi-word load with the register. */
386 else
387 {
388 int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
389 if (q->regnum + data_words > regnum)
390 return 1;
391 }
392 }
393
394 return 0; /* no load pending */
395}
396
397/* Check to see if a cache flush pending which affects the given address. */
398static int
399flush_pending_for_address (SIM_CPU *cpu, SI address)
400{
401 int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
402 int i;
403 for (i = 0; i < cache_queue.ix; ++i)
404 {
405 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
406
407 /* Must be the same kind of request and active. */
408 if (! q->active || q->request != cache_flush)
409 continue;
410
411 /* If the addresses are equal, then we have a match. */
412 if ((q->address & line_mask) == (address & line_mask))
413 return 1; /* flush pending */
414 }
415
416 return 0; /* no flush pending */
417}
418
419static void
420remove_cache_queue_element (SIM_CPU *cpu, int i)
421{
422 /* If we are removing the load of a FR register, then remember which one(s).
423 */
424 CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
425
426 for (--cache_queue.ix; i < cache_queue.ix; ++i)
427 cache_queue.q[i] = cache_queue.q[i + 1];
428
429 /* If we removed a load of a FR register, check to see if any other loads
430 of that register is still queued. If not, then apply the queued post
431 processing time of that register to its latency. Also apply
432 1 extra cycle of latency to the register since it was a floating point
433 load. */
434 if (q.request == cache_load && q.regtype != REGTYPE_NONE)
435 {
436 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
437 int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
438 int j;
439 for (j = 0; j < data_words; ++j)
440 {
441 int regnum = q.regnum + j;
442 if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
443 {
444 if (q.regtype == REGTYPE_FR)
445 {
446 int *fr = ps->fr_busy;
447 fr[regnum] += 1 + ps->fr_ptime[regnum];
448 ps->fr_ptime[regnum] = 0;
449 }
450 }
451 }
452 }
453}
454
455/* Copy data from the cache buffer to the target register(s). */
456static void
457copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
458 CACHE_QUEUE_ELEMENT *q)
459{
460 switch (q->length)
461 {
462 case 1:
463 if (q->regtype == REGTYPE_FR)
464 {
465 if (q->is_signed)
466 {
467 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
468 SET_H_FR (q->regnum, value);
469 }
470 else
471 {
472 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
473 SET_H_FR (q->regnum, value);
474 }
475 }
476 else
477 {
478 if (q->is_signed)
479 {
480 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
481 SET_H_GR (q->regnum, value);
482 }
483 else
484 {
485 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
486 SET_H_GR (q->regnum, value);
487 }
488 }
489 break;
490 case 2:
491 if (q->regtype == REGTYPE_FR)
492 {
493 if (q->is_signed)
494 {
495 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
496 SET_H_FR (q->regnum, value);
497 }
498 else
499 {
500 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
501 SET_H_FR (q->regnum, value);
502 }
503 }
504 else
505 {
506 if (q->is_signed)
507 {
508 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
509 SET_H_GR (q->regnum, value);
510 }
511 else
512 {
513 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
514 SET_H_GR (q->regnum, value);
515 }
516 }
517 break;
518 case 4:
519 if (q->regtype == REGTYPE_FR)
520 {
521 SET_H_FR (q->regnum,
522 CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
523 }
524 else
525 {
526 SET_H_GR (q->regnum,
527 CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
528 }
529 break;
530 case 8:
531 if (q->regtype == REGTYPE_FR)
532 {
533 SET_H_FR_DOUBLE (q->regnum,
534 CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
535 }
536 else
537 {
538 SET_H_GR_DOUBLE (q->regnum,
539 CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
540 }
541 break;
542 case 16:
543 if (q->regtype == REGTYPE_FR)
544 frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
545 CACHE_RETURN_DATA_ADDRESS (cache, slot,
546 q->address,
547 16));
548 else
549 frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
550 CACHE_RETURN_DATA_ADDRESS (cache, slot,
551 q->address,
552 16));
553 break;
554 default:
555 abort ();
556 }
557}
558
559static int
560request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
561{
562 FRV_CACHE* cache;
563 if (! q->active || q->cycles > 0)
564 return 0;
565
566 cache = CPU_DATA_CACHE (cpu);
567 switch (q->request)
568 {
569 case cache_load:
570 /* For loads, we must wait until the data is returned from the cache. */
571 if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
572 {
573 copy_load_data (cpu, cache, 0, q);
574 return 1;
575 }
576 if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
577 {
578 copy_load_data (cpu, cache, 1, q);
579 return 1;
580 }
581 break;
582
583 case cache_flush:
584 /* We must wait until the data is flushed. */
585 if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
586 return 1;
587 if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
588 return 1;
589 break;
590
591 default:
592 /* All other requests are complete once they've been made. */
593 return 1;
594 }
595
596 return 0;
597}
598
599/* Run the insn and data caches through the given number of cycles, taking
600 note of load requests which are fullfilled as a result. */
601static void
602run_caches (SIM_CPU *cpu, int cycles)
603{
604 FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
605 FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
606 int i;
607 /* For each cycle, run the caches, noting which requests have been fullfilled
608 and submitting new requests on their designated cycles. */
609 for (i = 0; i < cycles; ++i)
610 {
611 int j;
612 /* Run the caches through 1 cycle. */
613 frv_cache_run (data_cache, 1);
614 frv_cache_run (insn_cache, 1);
615
616 /* Note whether prefetched insn data has been loaded yet. */
617 for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
618 {
619 if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
620 && frv_cache_data_in_buffer (insn_cache, j,
621 frv_insn_fetch_buffer[j].address,
622 frv_insn_fetch_buffer[j].reqno))
623 frv_insn_fetch_buffer[j].reqno = NO_REQNO;
624 }
625
626 /* Check to see which requests have been satisfied and which should
627 be submitted now. */
628 for (j = 0; j < cache_queue.ix; ++j)
629 {
630 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
631 if (! q->active)
632 continue;
633
634 /* If a load has been satisfied, complete the operation and remove it
635 from the queue. */
636 if (request_complete (cpu, q))
637 {
638 remove_cache_queue_element (cpu, j);
639 --j;
640 continue;
641 }
642
643 /* Decrease the cycle count of each queued request.
644 Submit a request for each queued request whose cycle count has
645 become zero. */
646 --q->cycles;
647 if (q->cycles == 0)
648 submit_cache_request (q);
649 }
650 }
651}
652
653static void
654apply_latency_adjustments (SIM_CPU *cpu)
655{
656 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
657 int i;
658 /* update the latencies of the registers. */
659 int *fr = ps->fr_busy;
660 int *acc = ps->acc_busy;
661 for (i = 0; i < 64; ++i)
662 {
663 if (ps->fr_busy_adjust[i] > 0)
664 *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative. */
665 if (ps->acc_busy_adjust[i] > 0)
666 *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative. */
667 ++fr;
668 ++acc;
669 }
670}
671
672/* Account for the number of cycles which have just passed in the latency of
673 various system elements. Works for negative cycles too so that latency
674 can be extended in the case of insn fetch latency.
675 If negative or zero, then no adjustment is necessary. */
676static void
677update_latencies (SIM_CPU *cpu, int cycles)
678{
679 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
680 int i;
681 /* update the latencies of the registers. */
682 int *fdiv;
683 int *fsqrt;
684 int *idiv;
e930b1f5
DB
685 int *flt;
686 int *media;
b34f6357
DB
687 int *ccr;
688 int *gr = ps->gr_busy;
689 int *fr = ps->fr_busy;
690 int *acc = ps->acc_busy;
153431d6 691 int *spr;
b34f6357
DB
692 /* This loop handles GR, FR and ACC registers. */
693 for (i = 0; i < 64; ++i)
694 {
695 if (*gr <= cycles)
696 {
697 *gr = 0;
698 reset_gr_flags (cpu, i);
699 }
700 else
701 *gr -= cycles;
702 /* If the busy drops to 0, then mark the register as
703 "not in use". */
704 if (*fr <= cycles)
705 {
706 int *fr_lat = ps->fr_latency + i;
707 *fr = 0;
708 ps->fr_busy_adjust[i] = 0;
709 /* Only clear flags if this register has no target latency. */
710 if (*fr_lat == 0)
711 reset_fr_flags (cpu, i);
712 }
713 else
714 *fr -= cycles;
715 /* If the busy drops to 0, then mark the register as
716 "not in use". */
717 if (*acc <= cycles)
718 {
719 int *acc_lat = ps->acc_latency + i;
720 *acc = 0;
721 ps->acc_busy_adjust[i] = 0;
722 /* Only clear flags if this register has no target latency. */
723 if (*acc_lat == 0)
724 reset_acc_flags (cpu, i);
725 }
726 else
727 *acc -= cycles;
728 ++gr;
729 ++fr;
730 ++acc;
731 }
732 /* This loop handles CCR registers. */
733 ccr = ps->ccr_busy;
734 for (i = 0; i < 8; ++i)
735 {
736 if (*ccr <= cycles)
737 {
738 *ccr = 0;
739 reset_cc_flags (cpu, i);
740 }
741 else
742 *ccr -= cycles;
743 ++ccr;
744 }
153431d6
DB
745 /* This loop handles SPR registers. */
746 spr = ps->spr_busy;
747 for (i = 0; i < 4096; ++i)
748 {
749 if (*spr <= cycles)
750 *spr = 0;
751 else
752 *spr -= cycles;
753 ++spr;
754 }
b34f6357
DB
755 /* This loop handles resources. */
756 idiv = ps->idiv_busy;
757 fdiv = ps->fdiv_busy;
758 fsqrt = ps->fsqrt_busy;
759 for (i = 0; i < 2; ++i)
760 {
761 *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
762 *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
763 *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
764 ++idiv;
765 ++fdiv;
766 ++fsqrt;
767 }
e930b1f5
DB
768 /* Float and media units can occur in 4 slots on some machines. */
769 flt = ps->float_busy;
770 media = ps->media_busy;
771 for (i = 0; i < 4; ++i)
772 {
773 *flt = (*flt <= cycles) ? 0 : (*flt - cycles);
774 *media = (*media <= cycles) ? 0 : (*media - cycles);
775 ++flt;
776 ++media;
777 }
b34f6357
DB
778}
779
780/* Print information about the wait for the given number of cycles. */
781void
782frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
783{
784 if (TRACE_INSN_P (cpu) && cycles > 0)
785 {
786 SIM_DESC sd = CPU_STATE (cpu);
787 trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
788 hazard_name, cycles);
789 }
790}
791
792void
793trace_vliw_wait_cycles (SIM_CPU *cpu)
794{
795 if (TRACE_INSN_P (cpu))
796 {
797 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
798 frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
799 }
800}
801
802/* Wait for the given number of cycles. */
803void
804frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
805{
806 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
807 update_latencies (cpu, cycles);
808 run_caches (cpu, cycles);
809 PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
810}
811
812void
813handle_resource_wait (SIM_CPU *cpu)
814{
815 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
816 if (ps->vliw_wait != 0)
817 frv_model_advance_cycles (cpu, ps->vliw_wait);
818 if (ps->vliw_load_stall > ps->vliw_wait)
819 ps->vliw_load_stall -= ps->vliw_wait;
820 else
821 ps->vliw_load_stall = 0;
822}
823
824/* Account for the number of cycles until these resources will be available
825 again. */
826static void
827update_target_latencies (SIM_CPU *cpu)
828{
829 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
830 int i;
831 /* update the latencies of the registers. */
832 int *ccr_lat;
833 int *gr_lat = ps->gr_latency;
834 int *fr_lat = ps->fr_latency;
835 int *acc_lat = ps->acc_latency;
153431d6 836 int *spr_lat;
b34f6357
DB
837 int *ccr;
838 int *gr = ps->gr_busy;
839 int *fr = ps->fr_busy;
840 int *acc = ps->acc_busy;
153431d6 841 int *spr;
b34f6357
DB
842 /* This loop handles GR, FR and ACC registers. */
843 for (i = 0; i < 64; ++i)
844 {
845 if (*gr_lat)
846 {
847 *gr = *gr_lat;
848 *gr_lat = 0;
849 }
850 if (*fr_lat)
851 {
852 *fr = *fr_lat;
853 *fr_lat = 0;
854 }
855 if (*acc_lat)
856 {
857 *acc = *acc_lat;
858 *acc_lat = 0;
859 }
860 ++gr; ++gr_lat;
861 ++fr; ++fr_lat;
862 ++acc; ++acc_lat;
863 }
864 /* This loop handles CCR registers. */
865 ccr = ps->ccr_busy;
866 ccr_lat = ps->ccr_latency;
867 for (i = 0; i < 8; ++i)
868 {
869 if (*ccr_lat)
870 {
871 *ccr = *ccr_lat;
872 *ccr_lat = 0;
873 }
874 ++ccr; ++ccr_lat;
875 }
153431d6
DB
876 /* This loop handles SPR registers. */
877 spr = ps->spr_busy;
878 spr_lat = ps->spr_latency;
879 for (i = 0; i < 4096; ++i)
880 {
881 if (*spr_lat)
882 {
883 *spr = *spr_lat;
884 *spr_lat = 0;
885 }
886 ++spr; ++spr_lat;
887 }
b34f6357
DB
888}
889
890/* Run the caches until all pending cache flushes are complete. */
891static void
892wait_for_flush (SIM_CPU *cpu)
893{
894 SI address = CPU_LOAD_ADDRESS (cpu);
895 int wait = 0;
896 while (flush_pending_for_address (cpu, address))
897 {
898 frv_model_advance_cycles (cpu, 1);
899 ++wait;
900 }
901 if (TRACE_INSN_P (cpu) && wait)
902 {
903 sprintf (hazard_name, "Data cache flush address %p:", address);
904 frv_model_trace_wait_cycles (cpu, wait, hazard_name);
905 }
906}
907
908/* Initialize cycle counting for an insn.
909 FIRST_P is non-zero if this is the first insn in a set of parallel
910 insns. */
911void
912frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
913{
914 SIM_DESC sd = CPU_STATE (cpu);
915 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
916
917 ps->vliw_wait = 0;
918 ps->post_wait = 0;
919 memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
920 memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
921
922 if (first_p)
923 {
924 ps->vliw_insns++;
925 ps->vliw_cycles = 0;
926 ps->vliw_branch_taken = 0;
927 ps->vliw_load_stall = 0;
928 }
929
930 switch (STATE_ARCHITECTURE (sd)->mach)
931 {
932 case bfd_mach_fr400:
676a64f4 933 case bfd_mach_fr450:
b34f6357
DB
934 fr400_model_insn_before (cpu, first_p);
935 break;
936 case bfd_mach_fr500:
937 fr500_model_insn_before (cpu, first_p);
938 break;
e930b1f5
DB
939 case bfd_mach_fr550:
940 fr550_model_insn_before (cpu, first_p);
941 break;
b34f6357
DB
942 default:
943 break;
944 }
945
946 if (first_p)
947 wait_for_flush (cpu);
948}
949
950/* Record the cycles computed for an insn.
951 LAST_P is non-zero if this is the last insn in a set of parallel insns,
952 and we update the total cycle count.
953 CYCLES is the cycle count of the insn. */
954
955void
956frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
957{
958 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
959 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
960 SIM_DESC sd = CPU_STATE (cpu);
961
962 PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
963
964 /* The number of cycles for a VLIW insn is the maximum number of cycles
965 used by any individual insn within it. */
966 if (cycles > ps->vliw_cycles)
967 ps->vliw_cycles = cycles;
968
969 if (last_p)
970 {
971 /* This is the last insn in a VLIW insn. */
972 struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
973
974 activate_cache_requests (cpu); /* before advancing cycles. */
975 apply_latency_adjustments (cpu); /* must go first. */
976 update_target_latencies (cpu); /* must go next. */
977 frv_model_advance_cycles (cpu, ps->vliw_cycles);
978
979 PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
980
981 /* Check the interrupt timer. cycles contains the total cycle count. */
982 if (timer->enabled)
983 {
984 cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
985 if (timer->current % timer->value
986 + (cycles - timer->current) >= timer->value)
987 frv_queue_external_interrupt (cpu, timer->interrupt);
988 timer->current = cycles;
989 }
990
991 ps->past_first_p = 0; /* Next one will be the first in a new VLIW. */
992 ps->branch_address = -1;
993 }
994 else
995 ps->past_first_p = 1;
996
997 switch (STATE_ARCHITECTURE (sd)->mach)
998 {
999 case bfd_mach_fr400:
676a64f4 1000 case bfd_mach_fr450:
b34f6357
DB
1001 fr400_model_insn_after (cpu, last_p, cycles);
1002 break;
1003 case bfd_mach_fr500:
1004 fr500_model_insn_after (cpu, last_p, cycles);
1005 break;
e930b1f5
DB
1006 case bfd_mach_fr550:
1007 fr550_model_insn_after (cpu, last_p, cycles);
1008 break;
b34f6357
DB
1009 default:
1010 break;
1011 }
1012}
1013
1014USI
1015frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
1016{
1017 /* Record the hint and branch address for use in profiling. */
1018 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1019 ps->branch_hint = hint;
1020 ps->branch_address = target;
1021}
1022
1023/* Top up the latency of the given GR by the given number of cycles. */
1024void
1025update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1026{
1027 if (out_GR >= 0)
1028 {
1029 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1030 int *gr = ps->gr_latency;
1031 if (gr[out_GR] < cycles)
1032 gr[out_GR] = cycles;
1033 }
1034}
1035
1036void
1037decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
1038{
1039 if (in_GR >= 0)
1040 {
1041 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1042 int *gr = ps->gr_busy;
1043 gr[in_GR] -= cycles;
1044 }
1045}
1046
1047/* Top up the latency of the given double GR by the number of cycles. */
1048void
1049update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1050{
1051 if (out_GR >= 0)
1052 {
1053 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1054 int *gr = ps->gr_latency;
1055 if (gr[out_GR] < cycles)
1056 gr[out_GR] = cycles;
1057 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1058 gr[out_GR + 1] = cycles;
1059 }
1060}
1061
1062void
1063update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1064{
1065 if (out_GR >= 0)
1066 {
1067 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1068 int *gr = ps->gr_latency;
1069
1070 /* The latency of the GR will be at least the number of cycles used
1071 by the insn. */
1072 if (gr[out_GR] < cycles)
1073 gr[out_GR] = cycles;
1074
1075 /* The latency will also depend on how long it takes to retrieve the
1076 data from the cache or memory. Assume that the load is issued
1077 after the last cycle of the insn. */
1078 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1079 }
1080}
1081
1082void
1083update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1084{
1085 if (out_GR >= 0)
1086 {
1087 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1088 int *gr = ps->gr_latency;
1089
1090 /* The latency of the GR will be at least the number of cycles used
1091 by the insn. */
1092 if (gr[out_GR] < cycles)
1093 gr[out_GR] = cycles;
1094 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1095 gr[out_GR + 1] = cycles;
1096
1097 /* The latency will also depend on how long it takes to retrieve the
1098 data from the cache or memory. Assume that the load is issued
1099 after the last cycle of the insn. */
1100 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1101 }
1102}
1103
1104void
1105update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
1106{
1107 update_GR_latency_for_load (cpu, out_GR, cycles);
1108}
1109
1110/* Top up the latency of the given FR by the given number of cycles. */
1111void
1112update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1113{
1114 if (out_FR >= 0)
1115 {
1116 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1117 int *fr = ps->fr_latency;
1118 if (fr[out_FR] < cycles)
1119 fr[out_FR] = cycles;
1120 }
1121}
1122
1123/* Top up the latency of the given double FR by the number of cycles. */
1124void
1125update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1126{
1127 if (out_FR >= 0)
1128 {
1129 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1130 int *fr = ps->fr_latency;
1131 if (fr[out_FR] < cycles)
1132 fr[out_FR] = cycles;
1133 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1134 fr[out_FR + 1] = cycles;
1135 }
1136}
1137
1138void
1139update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1140{
1141 if (out_FR >= 0)
1142 {
1143 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1144 int *fr = ps->fr_latency;
1145
1146 /* The latency of the FR will be at least the number of cycles used
1147 by the insn. */
1148 if (fr[out_FR] < cycles)
1149 fr[out_FR] = cycles;
1150
1151 /* The latency will also depend on how long it takes to retrieve the
1152 data from the cache or memory. Assume that the load is issued
1153 after the last cycle of the insn. */
1154 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1155 }
1156}
1157
1158void
1159update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1160{
1161 if (out_FR >= 0)
1162 {
1163 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1164 int *fr = ps->fr_latency;
1165
1166 /* The latency of the FR will be at least the number of cycles used
1167 by the insn. */
1168 if (fr[out_FR] < cycles)
1169 fr[out_FR] = cycles;
1170 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1171 fr[out_FR + 1] = cycles;
1172
1173 /* The latency will also depend on how long it takes to retrieve the
1174 data from the cache or memory. Assume that the load is issued
1175 after the last cycle of the insn. */
1176 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1177 }
1178}
1179
1180/* Top up the post-processing time of the given FR by the given number of
1181 cycles. */
1182void
1c453cd6
DB
1183update_FR_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1184{
1185 if (out_FR >= 0)
1186 {
1187 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1188 /* If a load is pending on this register, then add the cycles to
1189 the post processing time for this register. Otherwise apply it
1190 directly to the latency of the register. */
1191 if (! load_pending_for_register (cpu, out_FR, 1, REGTYPE_FR))
1192 {
1193 int *fr = ps->fr_latency;
1194 fr[out_FR] += cycles;
1195 }
1196 else
1197 ps->fr_ptime[out_FR] += cycles;
1198 }
1199}
1200
1201void
1202update_FRdouble_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1203{
1204 if (out_FR >= 0)
1205 {
1206 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1207 /* If a load is pending on this register, then add the cycles to
1208 the post processing time for this register. Otherwise apply it
1209 directly to the latency of the register. */
1210 if (! load_pending_for_register (cpu, out_FR, 2, REGTYPE_FR))
1211 {
1212 int *fr = ps->fr_latency;
1213 fr[out_FR] += cycles;
1214 if (out_FR < 63)
1215 fr[out_FR + 1] += cycles;
1216 }
1217 else
1218 {
1219 ps->fr_ptime[out_FR] += cycles;
1220 if (out_FR < 63)
1221 ps->fr_ptime[out_FR + 1] += cycles;
1222 }
1223 }
1224}
1225
1226/* Top up the post-processing time of the given ACC by the given number of
1227 cycles. */
1228void
b34f6357
DB
1229update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
1230{
1231 if (out_ACC >= 0)
1232 {
1233 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1234 /* No load can be pending on this register. Apply the cycles
1235 directly to the latency of the register. */
1236 int *acc = ps->acc_latency;
1237 acc[out_ACC] += cycles;
1238 }
1239}
1240
1c453cd6
DB
1241/* Top up the post-processing time of the given SPR by the given number of
1242 cycles. */
1243void
1244update_SPR_ptime (SIM_CPU *cpu, INT out_SPR, int cycles)
1245{
1246 if (out_SPR >= 0)
1247 {
1248 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1249 /* No load can be pending on this register. Apply the cycles
1250 directly to the latency of the register. */
1251 int *spr = ps->spr_latency;
1252 spr[out_SPR] += cycles;
1253 }
1254}
1255
b34f6357
DB
1256void
1257decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1258{
1259 if (out_ACC >= 0)
1260 {
1261 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1262 int *acc = ps->acc_busy;
1263 acc[out_ACC] -= cycles;
1264 if (ps->acc_busy_adjust[out_ACC] >= 0
1265 && cycles > ps->acc_busy_adjust[out_ACC])
1266 ps->acc_busy_adjust[out_ACC] = cycles;
1267 }
1268}
1269
1c453cd6
DB
1270void
1271increase_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1272{
1273 if (out_ACC >= 0)
1274 {
1275 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1276 int *acc = ps->acc_busy;
1277 acc[out_ACC] += cycles;
1278 }
1279}
1280
1281void
1282enforce_full_acc_latency (SIM_CPU *cpu, INT in_ACC)
1283{
1284 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1285 ps->acc_busy_adjust [in_ACC] = -1;
1286}
1287
b34f6357
DB
1288void
1289decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1290{
1291 if (out_FR >= 0)
1292 {
1293 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1294 int *fr = ps->fr_busy;
1295 fr[out_FR] -= cycles;
1296 if (ps->fr_busy_adjust[out_FR] >= 0
1297 && cycles > ps->fr_busy_adjust[out_FR])
1298 ps->fr_busy_adjust[out_FR] = cycles;
1299 }
1300}
1301
1302void
1303increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1304{
1305 if (out_FR >= 0)
1306 {
1307 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1308 int *fr = ps->fr_busy;
1309 fr[out_FR] += cycles;
1310 }
1311}
1312
1313/* Top up the latency of the given ACC by the given number of cycles. */
1314void
1315update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
1316{
1317 if (out_ACC >= 0)
1318 {
1319 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1320 int *acc = ps->acc_latency;
1321 if (acc[out_ACC] < cycles)
1322 acc[out_ACC] = cycles;
1323 }
1324}
1325
1326/* Top up the latency of the given CCR by the given number of cycles. */
1327void
1328update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
1329{
1330 if (out_CCR >= 0)
1331 {
1332 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1333 int *ccr = ps->ccr_latency;
1334 if (ccr[out_CCR] < cycles)
1335 ccr[out_CCR] = cycles;
1336 }
1337}
1338
153431d6
DB
1339/* Top up the latency of the given SPR by the given number of cycles. */
1340void
1341update_SPR_latency (SIM_CPU *cpu, INT out_SPR, int cycles)
1342{
1343 if (out_SPR >= 0)
1344 {
1345 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1346 int *spr = ps->spr_latency;
1347 if (spr[out_SPR] < cycles)
1348 spr[out_SPR] = cycles;
1349 }
1350}
1351
b34f6357
DB
1352/* Top up the latency of the given integer division resource by the given
1353 number of cycles. */
1354void
1355update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1356{
1357 /* operate directly on the busy cycles since each resource can only
1358 be used once in a VLIW insn. */
1359 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1360 int *r = ps->idiv_busy;
1361 r[in_resource] = cycles;
1362}
1363
1364/* Set the latency of the given resource to the given number of cycles. */
1365void
1366update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1367{
1368 /* operate directly on the busy cycles since each resource can only
1369 be used once in a VLIW insn. */
1370 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1371 int *r = ps->fdiv_busy;
1372 r[in_resource] = cycles;
1373}
1374
1375/* Set the latency of the given resource to the given number of cycles. */
1376void
1377update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1378{
1379 /* operate directly on the busy cycles since each resource can only
1380 be used once in a VLIW insn. */
1381 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1382 int *r = ps->fsqrt_busy;
1383 r[in_resource] = cycles;
1384}
1385
e930b1f5
DB
1386/* Set the latency of the given resource to the given number of cycles. */
1387void
1388update_float_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1389{
1390 /* operate directly on the busy cycles since each resource can only
1391 be used once in a VLIW insn. */
1392 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1393 int *r = ps->float_busy;
1394 r[in_resource] = cycles;
1395}
1396
1397void
1398update_media_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1399{
1400 /* operate directly on the busy cycles since each resource can only
1401 be used once in a VLIW insn. */
1402 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1403 int *r = ps->media_busy;
1404 r[in_resource] = cycles;
1405}
1406
b34f6357
DB
1407/* Set the branch penalty to the given number of cycles. */
1408void
1409update_branch_penalty (SIM_CPU *cpu, int cycles)
1410{
1411 /* operate directly on the busy cycles since only one branch can occur
1412 in a VLIW insn. */
1413 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1414 ps->branch_penalty = cycles;
1415}
1416
1417/* Check the availability of the given GR register and update the number
1418 of cycles the current VLIW insn must wait until it is available. */
1419void
1420vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1421{
1422 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1423 int *gr = ps->gr_busy;
1424 /* If the latency of the register is greater than the current wait
1425 then update the current wait. */
1426 if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
1427 {
1428 if (TRACE_INSN_P (cpu))
1429 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1430 ps->vliw_wait = gr[in_GR];
1431 }
1432}
1433
1434/* Check the availability of the given GR register and update the number
1435 of cycles the current VLIW insn must wait until it is available. */
1436void
1437vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1438{
1439 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1440 int *gr = ps->gr_busy;
1441 /* If the latency of the register is greater than the current wait
1442 then update the current wait. */
1443 if (in_GR >= 0)
1444 {
1445 if (gr[in_GR] > ps->vliw_wait)
1446 {
1447 if (TRACE_INSN_P (cpu))
1448 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1449 ps->vliw_wait = gr[in_GR];
1450 }
1451 if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
1452 {
1453 if (TRACE_INSN_P (cpu))
1454 sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
1455 ps->vliw_wait = gr[in_GR + 1];
1456 }
1457 }
1458}
1459
1460/* Check the availability of the given FR register and update the number
1461 of cycles the current VLIW insn must wait until it is available. */
1462void
1463vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1464{
1465 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1466 int *fr = ps->fr_busy;
1467 /* If the latency of the register is greater than the current wait
1468 then update the current wait. */
1469 if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
1470 {
1471 if (TRACE_INSN_P (cpu))
1472 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1473 ps->vliw_wait = fr[in_FR];
1474 }
1475}
1476
1477/* Check the availability of the given GR register and update the number
1478 of cycles the current VLIW insn must wait until it is available. */
1479void
1480vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1481{
1482 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1483 int *fr = ps->fr_busy;
1484 /* If the latency of the register is greater than the current wait
1485 then update the current wait. */
1486 if (in_FR >= 0)
1487 {
1488 if (fr[in_FR] > ps->vliw_wait)
1489 {
1490 if (TRACE_INSN_P (cpu))
1491 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1492 ps->vliw_wait = fr[in_FR];
1493 }
1494 if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
1495 {
1496 if (TRACE_INSN_P (cpu))
1497 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1498 ps->vliw_wait = fr[in_FR + 1];
1499 }
1500 }
1501}
1502
1503/* Check the availability of the given CCR register and update the number
1504 of cycles the current VLIW insn must wait until it is available. */
1505void
1506vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1507{
1508 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1509 int *ccr = ps->ccr_busy;
1510 /* If the latency of the register is greater than the current wait
1511 then update the current wait. */
1512 if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
1513 {
1514 if (TRACE_INSN_P (cpu))
1515 {
1516 if (in_CCR > 3)
1517 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
1518 else
1519 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1520 }
1521 ps->vliw_wait = ccr[in_CCR];
1522 }
1523}
1524
1525/* Check the availability of the given ACC register and update the number
1526 of cycles the current VLIW insn must wait until it is available. */
1527void
1528vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1529{
1530 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1531 int *acc = ps->acc_busy;
1532 /* If the latency of the register is greater than the current wait
1533 then update the current wait. */
1534 if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
1535 {
1536 if (TRACE_INSN_P (cpu))
1537 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1538 ps->vliw_wait = acc[in_ACC];
1539 }
1540}
1541
153431d6
DB
1542/* Check the availability of the given SPR register and update the number
1543 of cycles the current VLIW insn must wait until it is available. */
1544void
1545vliw_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1546{
1547 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1548 int *spr = ps->spr_busy;
1549 /* If the latency of the register is greater than the current wait
1550 then update the current wait. */
1551 if (in_SPR >= 0 && spr[in_SPR] > ps->vliw_wait)
1552 {
1553 if (TRACE_INSN_P (cpu))
1554 sprintf (hazard_name, "Data hazard for spr %d:", in_SPR);
1555 ps->vliw_wait = spr[in_SPR];
1556 }
1557}
1558
b34f6357
DB
1559/* Check the availability of the given integer division resource and update
1560 the number of cycles the current VLIW insn must wait until it is available.
1561*/
1562void
1563vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
1564{
1565 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1566 int *r = ps->idiv_busy;
1567 /* If the latency of the resource is greater than the current wait
1568 then update the current wait. */
1569 if (r[in_resource] > ps->vliw_wait)
1570 {
1571 if (TRACE_INSN_P (cpu))
1572 {
1573 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1574 }
1575 ps->vliw_wait = r[in_resource];
1576 }
1577}
1578
1579/* Check the availability of the given float division resource and update
1580 the number of cycles the current VLIW insn must wait until it is available.
1581*/
1582void
1583vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
1584{
1585 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1586 int *r = ps->fdiv_busy;
1587 /* If the latency of the resource is greater than the current wait
1588 then update the current wait. */
1589 if (r[in_resource] > ps->vliw_wait)
1590 {
1591 if (TRACE_INSN_P (cpu))
1592 {
1c453cd6 1593 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", in_resource);
b34f6357
DB
1594 }
1595 ps->vliw_wait = r[in_resource];
1596 }
1597}
1598
1599/* Check the availability of the given float square root resource and update
1600 the number of cycles the current VLIW insn must wait until it is available.
1601*/
1602void
1603vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
1604{
1605 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1606 int *r = ps->fsqrt_busy;
1607 /* If the latency of the resource is greater than the current wait
1608 then update the current wait. */
1609 if (r[in_resource] > ps->vliw_wait)
1610 {
1611 if (TRACE_INSN_P (cpu))
1612 {
1c453cd6 1613 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", in_resource);
b34f6357
DB
1614 }
1615 ps->vliw_wait = r[in_resource];
1616 }
1617}
1618
e930b1f5
DB
1619/* Check the availability of the given float unit resource and update
1620 the number of cycles the current VLIW insn must wait until it is available.
1621*/
1622void
1623vliw_wait_for_float_resource (SIM_CPU *cpu, INT in_resource)
1624{
1625 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1626 int *r = ps->float_busy;
1627 /* If the latency of the resource is greater than the current wait
1628 then update the current wait. */
1629 if (r[in_resource] > ps->vliw_wait)
1630 {
1631 if (TRACE_INSN_P (cpu))
1632 {
1633 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", in_resource);
1634 }
1635 ps->vliw_wait = r[in_resource];
1636 }
1637}
1638
1639/* Check the availability of the given media unit resource and update
1640 the number of cycles the current VLIW insn must wait until it is available.
1641*/
1642void
1643vliw_wait_for_media_resource (SIM_CPU *cpu, INT in_resource)
1644{
1645 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1646 int *r = ps->media_busy;
1647 /* If the latency of the resource is greater than the current wait
1648 then update the current wait. */
1649 if (r[in_resource] > ps->vliw_wait)
1650 {
1651 if (TRACE_INSN_P (cpu))
1652 {
1653 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", in_resource);
1654 }
1655 ps->vliw_wait = r[in_resource];
1656 }
1657}
1658
b34f6357
DB
1659/* Run the caches until all requests for the given register(s) are satisfied. */
1660void
1661load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1662{
1663 if (in_GR >= 0)
1664 {
1665 int wait = 0;
1666 while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
1667 {
1668 frv_model_advance_cycles (cpu, 1);
1669 ++wait;
1670 }
1671 if (wait)
1672 {
1673 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1674 ps->vliw_wait += wait;
1675 ps->vliw_load_stall += wait;
1676 if (TRACE_INSN_P (cpu))
1677 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1678 }
1679 }
1680}
1681
1682void
1683load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1684{
1685 if (in_FR >= 0)
1686 {
1687 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1688 int *fr;
1689 int wait = 0;
1690 while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
1691 {
1692 frv_model_advance_cycles (cpu, 1);
1693 ++wait;
1694 }
1695 /* Post processing time may have been added to the register's
1696 latency after the loads were processed. Account for that too.
1697 */
1698 fr = ps->fr_busy;
1699 if (fr[in_FR])
1700 {
1701 wait += fr[in_FR];
1702 frv_model_advance_cycles (cpu, fr[in_FR]);
1703 }
1704 /* Update the vliw_wait with the number of cycles we waited for the
1705 load and any post-processing. */
1706 if (wait)
1707 {
1708 ps->vliw_wait += wait;
1709 ps->vliw_load_stall += wait;
1710 if (TRACE_INSN_P (cpu))
1711 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1712 }
1713 }
1714}
1715
1716void
1717load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1718{
1719 if (in_GR >= 0)
1720 {
1721 int wait = 0;
1722 while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
1723 {
1724 frv_model_advance_cycles (cpu, 1);
1725 ++wait;
1726 }
1727 if (wait)
1728 {
1729 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1730 ps->vliw_wait += wait;
1731 ps->vliw_load_stall += wait;
1732 if (TRACE_INSN_P (cpu))
1733 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1734 }
1735 }
1736}
1737
1738void
1739load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1740{
1741 if (in_FR >= 0)
1742 {
1743 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1744 int *fr;
1745 int wait = 0;
1746 while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
1747 {
1748 frv_model_advance_cycles (cpu, 1);
1749 ++wait;
1750 }
1751 /* Post processing time may have been added to the registers'
1752 latencies after the loads were processed. Account for that too.
1753 */
1754 fr = ps->fr_busy;
1755 if (fr[in_FR])
1756 {
1757 wait += fr[in_FR];
1758 frv_model_advance_cycles (cpu, fr[in_FR]);
1759 }
1760 if (in_FR < 63)
1761 {
1762 if (fr[in_FR + 1])
1763 {
1764 wait += fr[in_FR + 1];
1765 frv_model_advance_cycles (cpu, fr[in_FR + 1]);
1766 }
1767 }
1768 /* Update the vliw_wait with the number of cycles we waited for the
1769 load and any post-processing. */
1770 if (wait)
1771 {
1772 ps->vliw_wait += wait;
1773 ps->vliw_load_stall += wait;
1774 if (TRACE_INSN_P (cpu))
1775 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1776 }
1777 }
1778}
1779
1780void
1781enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
1782{
1783 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1784 ps->fr_busy_adjust [in_FR] = -1;
1785}
1786
1787/* Calculate how long the post processing for a floating point insn must
1788 wait for resources to become available. */
1789int
1790post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1791{
1792 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1793 int *fr = ps->fr_busy;
1794
1795 if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
1796 {
1797 ps->post_wait = fr[in_FR];
1798 if (TRACE_INSN_P (cpu))
1799 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1800 }
1801}
1802
1803/* Calculate how long the post processing for a floating point insn must
1804 wait for resources to become available. */
1805int
1806post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1807{
1808 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1809 int *fr = ps->fr_busy;
1810
1811 if (in_FR >= 0)
1812 {
1813 if (fr[in_FR] > ps->post_wait)
1814 {
1815 ps->post_wait = fr[in_FR];
1816 if (TRACE_INSN_P (cpu))
1817 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1818 }
1819 if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
1820 {
1821 ps->post_wait = fr[in_FR + 1];
1822 if (TRACE_INSN_P (cpu))
1823 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1824 }
1825 }
1826}
1827
1828int
1829post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1830{
1831 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1832 int *acc = ps->acc_busy;
1833
1834 if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
1835 {
1836 ps->post_wait = acc[in_ACC];
1837 if (TRACE_INSN_P (cpu))
1838 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1839 }
1840}
1841
1842int
1843post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1844{
1845 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1846 int *ccr = ps->ccr_busy;
1847
1848 if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
1849 {
1850 ps->post_wait = ccr[in_CCR];
1851 if (TRACE_INSN_P (cpu))
1852 {
1853 if (in_CCR > 3)
1854 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
1855 else
1856 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1857 }
1858 }
1859}
1860
1c453cd6
DB
1861int
1862post_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1863{
1864 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1865 int *spr = ps->spr_busy;
1866
1867 if (in_SPR >= 0 && spr[in_SPR] > ps->post_wait)
1868 {
1869 ps->post_wait = spr[in_SPR];
1870 if (TRACE_INSN_P (cpu))
1871 sprintf (hazard_name, "Data hazard for spr[%d]:", in_SPR);
1872 }
1873}
1874
b34f6357
DB
1875int
1876post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
1877{
1878 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1879 int *fdiv = ps->fdiv_busy;
1880
1881 /* Multiple floating point divisions in the same slot need only wait 1
1882 extra cycle. */
1883 if (fdiv[slot] > 0 && 1 > ps->post_wait)
1884 {
1885 ps->post_wait = 1;
1886 if (TRACE_INSN_P (cpu))
1887 {
1888 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
1889 }
1890 }
1891}
1892
1893int
1894post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
1895{
1896 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1897 int *fsqrt = ps->fsqrt_busy;
1898
1899 /* Multiple floating point square roots in the same slot need only wait 1
1900 extra cycle. */
1901 if (fsqrt[slot] > 0 && 1 > ps->post_wait)
1902 {
1903 ps->post_wait = 1;
1904 if (TRACE_INSN_P (cpu))
1905 {
1906 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
1907 }
1908 }
1909}
1910
e930b1f5
DB
1911int
1912post_wait_for_float (SIM_CPU *cpu, INT slot)
1913{
1914 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1915 int *flt = ps->float_busy;
1916
1917 /* Multiple floating point square roots in the same slot need only wait 1
1918 extra cycle. */
1919 if (flt[slot] > ps->post_wait)
1920 {
1921 ps->post_wait = flt[slot];
1922 if (TRACE_INSN_P (cpu))
1923 {
1924 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", slot);
1925 }
1926 }
1927}
1928
1929int
1930post_wait_for_media (SIM_CPU *cpu, INT slot)
1931{
1932 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1933 int *media = ps->media_busy;
1934
1935 /* Multiple floating point square roots in the same slot need only wait 1
1936 extra cycle. */
1937 if (media[slot] > ps->post_wait)
1938 {
1939 ps->post_wait = media[slot];
1940 if (TRACE_INSN_P (cpu))
1941 {
1942 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", slot);
1943 }
1944 }
1945}
1946
b34f6357
DB
1947/* Print cpu-specific profile information. */
1948#define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1949
1950static void
1951print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
1952{
1953 SIM_DESC sd = CPU_STATE (cpu);
1954
1955 if (cache != NULL)
1956 {
1957 char comma_buf[20];
1958 unsigned accesses;
1959
1960 sim_io_printf (sd, " %s Cache\n\n", cache_name);
1961 accesses = cache->statistics.accesses;
1962 sim_io_printf (sd, " Total accesses: %s\n", COMMAS (accesses));
1963 if (accesses != 0)
1964 {
1965 float rate;
1966 unsigned hits = cache->statistics.hits;
1967 sim_io_printf (sd, " Hits: %s\n", COMMAS (hits));
1968 rate = (float)hits / accesses;
1969 sim_io_printf (sd, " Hit rate: %.2f%%\n", rate * 100);
1970 }
1971 }
1972 else
1973 sim_io_printf (sd, " Model %s has no %s cache\n",
1974 MODEL_NAME (CPU_MODEL (cpu)), cache_name);
1975
1976 sim_io_printf (sd, "\n");
1977}
1978
ba9c4053
DB
1979/* This table must correspond to the UNIT_ATTR table in
1980 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1981 listed since the others cannot occur after mapping. */
b34f6357
DB
1982static char *
1983slot_names[] =
1984{
1985 "none",
e930b1f5
DB
1986 "I0", "I1", "I01", "I2", "I3", "IALL",
1987 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
b34f6357
DB
1988 "B0", "B1", "B01",
1989 "C"
1990};
1991
1992static void
1993print_parallel (SIM_CPU *cpu, int verbose)
1994{
1995 SIM_DESC sd = CPU_STATE (cpu);
1996 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1997 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1998 unsigned total, vliw;
1999 char comma_buf[20];
2000 float average;
2001
2002 sim_io_printf (sd, "Model %s Parallelization\n\n",
2003 MODEL_NAME (CPU_MODEL (cpu)));
2004
2005 total = PROFILE_TOTAL_INSN_COUNT (p);
2006 sim_io_printf (sd, " Total instructions: %s\n", COMMAS (total));
2007 vliw = ps->vliw_insns;
2008 sim_io_printf (sd, " VLIW instructions: %s\n", COMMAS (vliw));
2009 average = (float)total / vliw;
2010 sim_io_printf (sd, " Average VLIW length: %.2f\n", average);
2011 average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
2012 sim_io_printf (sd, " Cycles per VLIW instruction: %.2f\n", average);
2013 average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
2014 sim_io_printf (sd, " Instructions per cycle: %.2f\n", average);
2015
2016 if (verbose)
2017 {
2018 int i;
2019 int max_val = 0;
2020 int max_name_len = 0;
2021 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2022 {
ba9c4053
DB
2023 if (INSNS_IN_SLOT (i))
2024 {
2025 int len;
2026 if (INSNS_IN_SLOT (i) > max_val)
2027 max_val = INSNS_IN_SLOT (i);
2028 len = strlen (slot_names[i]);
2029 if (len > max_name_len)
2030 max_name_len = len;
2031 }
b34f6357
DB
2032 }
2033 if (max_val > 0)
2034 {
2035 sim_io_printf (sd, "\n");
2036 sim_io_printf (sd, " Instructions per slot:\n");
2037 sim_io_printf (sd, "\n");
2038 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2039 {
2040 if (INSNS_IN_SLOT (i) != 0)
2041 {
2042 sim_io_printf (sd, " %*s: %*s: ",
2043 max_name_len, slot_names[i],
2044 max_val < 10000 ? 5 : 10,
2045 COMMAS (INSNS_IN_SLOT (i)));
2046 sim_profile_print_bar (sd, PROFILE_HISTOGRAM_WIDTH,
2047 INSNS_IN_SLOT (i),
2048 max_val);
2049 sim_io_printf (sd, "\n");
2050 }
2051 }
2052 } /* details to print */
2053 } /* verbose */
2054
2055 sim_io_printf (sd, "\n");
2056}
2057
2058void
2059frv_profile_info (SIM_CPU *cpu, int verbose)
2060{
2061 /* FIXME: Need to add smp support. */
2062 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
2063
2064#if WITH_PROFILE_PARALLEL_P
2065 if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
2066 print_parallel (cpu, verbose);
2067#endif
2068
2069#if WITH_PROFILE_CACHE_P
2070 if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
2071 {
2072 SIM_DESC sd = CPU_STATE (cpu);
2073 sim_io_printf (sd, "Model %s Cache Statistics\n\n",
2074 MODEL_NAME (CPU_MODEL (cpu)));
2075 print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
2076 print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
2077 }
2078#endif /* WITH_PROFILE_CACHE_P */
2079}
2080
2081/* A hack to get registers referenced for profiling. */
2082SI frv_ref_SI (SI ref) {return ref;}
2083#endif /* WITH_PROFILE_MODEL_P */