]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/frv/profile.c
sim: switch config.h usage to defs.h
[thirdparty/binutils-gdb.git] / sim / frv / profile.c
1 /* frv simulator machine independent profiling code.
2
3 Copyright (C) 1998-2021 Free Software Foundation, Inc.
4 Contributed by Red Hat
5
6 This file is part of the GNU simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 /* This must come before any other includes. */
22 #include "defs.h"
23
24 #define WANT_CPU
25 #define WANT_CPU_FRVBF
26
27 #include "sim-main.h"
28 #include "bfd.h"
29 #include <stdlib.h>
30
31 #if WITH_PROFILE_MODEL_P
32
33 #include "profile.h"
34 #include "profile-fr400.h"
35 #include "profile-fr500.h"
36 #include "profile-fr550.h"
37
38 static void
39 reset_gr_flags (SIM_CPU *cpu, INT gr)
40 {
41 SIM_DESC sd = CPU_STATE (cpu);
42 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
43 || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
44 fr400_reset_gr_flags (cpu, gr);
45 /* Other machines have no gr flags right now. */
46 }
47
48 static void
49 reset_fr_flags (SIM_CPU *cpu, INT fr)
50 {
51 SIM_DESC sd = CPU_STATE (cpu);
52 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
53 || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
54 fr400_reset_fr_flags (cpu, fr);
55 else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
56 fr500_reset_fr_flags (cpu, fr);
57 }
58
59 static void
60 reset_acc_flags (SIM_CPU *cpu, INT acc)
61 {
62 SIM_DESC sd = CPU_STATE (cpu);
63 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
64 || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
65 fr400_reset_acc_flags (cpu, acc);
66 /* Other machines have no acc flags right now. */
67 }
68
69 static void
70 reset_cc_flags (SIM_CPU *cpu, INT cc)
71 {
72 SIM_DESC sd = CPU_STATE (cpu);
73 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
74 fr500_reset_cc_flags (cpu, cc);
75 /* Other machines have no cc flags. */
76 }
77
78 void
79 set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
80 {
81 if (gr != -1)
82 {
83 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
84 reset_gr_flags (cpu, gr);
85 ps->cur_gr_complex |= (((DI)1) << gr);
86 }
87 }
88
89 void
90 set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
91 {
92 if (gr != -1)
93 {
94 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
95 ps->cur_gr_complex &= ~(((DI)1) << gr);
96 }
97 }
98
99 int
100 use_is_gr_complex (SIM_CPU *cpu, INT gr)
101 {
102 if (gr != -1)
103 {
104 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
105 return ps->cur_gr_complex & (((DI)1) << gr);
106 }
107 return 0;
108 }
109
110 /* Globals flag indicates whether this insn is being modeled. */
111 enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
112
113 /* static buffer for the name of the currently most restrictive hazard. */
114 static char hazard_name[100] = "";
115
116 /* Print information about the wait applied to an entire VLIW insn. */
117 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
118 = {
119 {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address. */
120 };
121
122 enum cache_request
123 {
124 cache_load,
125 cache_invalidate,
126 cache_flush,
127 cache_preload,
128 cache_unlock
129 };
130
131 /* A queue of load requests from the data cache. Use to keep track of loads
132 which are still pending. */
133 /* TODO -- some of these are mutually exclusive and can use a union. */
134 typedef struct
135 {
136 FRV_CACHE *cache;
137 unsigned reqno;
138 SI address;
139 int length;
140 int is_signed;
141 int regnum;
142 int cycles;
143 int regtype;
144 int lock;
145 int all;
146 int slot;
147 int active;
148 enum cache_request request;
149 } CACHE_QUEUE_ELEMENT;
150
151 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
152 struct
153 {
154 unsigned reqno;
155 int ix;
156 CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
157 } cache_queue = {0, 0};
158
159 /* Queue a request for a load from the cache. The load will be queued as
160 'inactive' and will be requested after the given number
161 of cycles have passed from the point the load is activated. */
162 void
163 request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
164 {
165 CACHE_QUEUE_ELEMENT *q;
166 FRV_VLIW *vliw;
167 int slot;
168
169 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
170 zero. */
171 if (CPU_LOAD_LENGTH (cpu) == 0)
172 return;
173
174 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
175 abort (); /* TODO: Make the queue dynamic */
176
177 q = & cache_queue.q[cache_queue.ix];
178 ++cache_queue.ix;
179
180 q->reqno = cache_queue.reqno++;
181 q->request = cache_load;
182 q->cache = CPU_DATA_CACHE (cpu);
183 q->address = CPU_LOAD_ADDRESS (cpu);
184 q->length = CPU_LOAD_LENGTH (cpu);
185 q->is_signed = CPU_LOAD_SIGNED (cpu);
186 q->regnum = regnum;
187 q->regtype = regtype;
188 q->cycles = cycles;
189 q->active = 0;
190
191 vliw = CPU_VLIW (cpu);
192 slot = vliw->next_slot - 1;
193 q->slot = (*vliw->current_vliw)[slot];
194
195 CPU_LOAD_LENGTH (cpu) = 0;
196 }
197
198 /* Queue a request to flush the cache. The request will be queued as
199 'inactive' and will be requested after the given number
200 of cycles have passed from the point the request is activated. */
201 void
202 request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
203 {
204 CACHE_QUEUE_ELEMENT *q;
205 FRV_VLIW *vliw;
206 int slot;
207
208 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
209 abort (); /* TODO: Make the queue dynamic */
210
211 q = & cache_queue.q[cache_queue.ix];
212 ++cache_queue.ix;
213
214 q->reqno = cache_queue.reqno++;
215 q->request = cache_flush;
216 q->cache = cache;
217 q->address = CPU_LOAD_ADDRESS (cpu);
218 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
219 q->cycles = cycles;
220 q->active = 0;
221
222 vliw = CPU_VLIW (cpu);
223 slot = vliw->next_slot - 1;
224 q->slot = (*vliw->current_vliw)[slot];
225 }
226
227 /* Queue a request to invalidate the cache. The request will be queued as
228 'inactive' and will be requested after the given number
229 of cycles have passed from the point the request is activated. */
230 void
231 request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
232 {
233 CACHE_QUEUE_ELEMENT *q;
234 FRV_VLIW *vliw;
235 int slot;
236
237 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
238 abort (); /* TODO: Make the queue dynamic */
239
240 q = & cache_queue.q[cache_queue.ix];
241 ++cache_queue.ix;
242
243 q->reqno = cache_queue.reqno++;
244 q->request = cache_invalidate;
245 q->cache = cache;
246 q->address = CPU_LOAD_ADDRESS (cpu);
247 q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
248 q->cycles = cycles;
249 q->active = 0;
250
251 vliw = CPU_VLIW (cpu);
252 slot = vliw->next_slot - 1;
253 q->slot = (*vliw->current_vliw)[slot];
254 }
255
256 /* Queue a request to preload the cache. The request will be queued as
257 'inactive' and will be requested after the given number
258 of cycles have passed from the point the request is activated. */
259 void
260 request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
261 {
262 CACHE_QUEUE_ELEMENT *q;
263 FRV_VLIW *vliw;
264 int slot;
265
266 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
267 abort (); /* TODO: Make the queue dynamic */
268
269 q = & cache_queue.q[cache_queue.ix];
270 ++cache_queue.ix;
271
272 q->reqno = cache_queue.reqno++;
273 q->request = cache_preload;
274 q->cache = cache;
275 q->address = CPU_LOAD_ADDRESS (cpu);
276 q->length = CPU_LOAD_LENGTH (cpu);
277 q->lock = CPU_LOAD_LOCK (cpu);
278 q->cycles = cycles;
279 q->active = 0;
280
281 vliw = CPU_VLIW (cpu);
282 slot = vliw->next_slot - 1;
283 q->slot = (*vliw->current_vliw)[slot];
284
285 CPU_LOAD_LENGTH (cpu) = 0;
286 }
287
288 /* Queue a request to unlock the cache. The request will be queued as
289 'inactive' and will be requested after the given number
290 of cycles have passed from the point the request is activated. */
291 void
292 request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
293 {
294 CACHE_QUEUE_ELEMENT *q;
295 FRV_VLIW *vliw;
296 int slot;
297
298 if (cache_queue.ix >= CACHE_QUEUE_SIZE)
299 abort (); /* TODO: Make the queue dynamic */
300
301 q = & cache_queue.q[cache_queue.ix];
302 ++cache_queue.ix;
303
304 q->reqno = cache_queue.reqno++;
305 q->request = cache_unlock;
306 q->cache = cache;
307 q->address = CPU_LOAD_ADDRESS (cpu);
308 q->cycles = cycles;
309 q->active = 0;
310
311 vliw = CPU_VLIW (cpu);
312 slot = vliw->next_slot - 1;
313 q->slot = (*vliw->current_vliw)[slot];
314 }
315
316 static void
317 submit_cache_request (CACHE_QUEUE_ELEMENT *q)
318 {
319 switch (q->request)
320 {
321 case cache_load:
322 frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
323 break;
324 case cache_flush:
325 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
326 q->all, 1/*flush*/);
327 break;
328 case cache_invalidate:
329 frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
330 q->all, 0/*flush*/);
331 break;
332 case cache_preload:
333 frv_cache_request_preload (q->cache, q->address, q->slot,
334 q->length, q->lock);
335 break;
336 case cache_unlock:
337 frv_cache_request_unlock (q->cache, q->address, q->slot);
338 break;
339 default:
340 abort ();
341 }
342 }
343
344 /* Activate all inactive load requests. */
345 static void
346 activate_cache_requests (SIM_CPU *cpu)
347 {
348 int i;
349 for (i = 0; i < cache_queue.ix; ++i)
350 {
351 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
352 if (! q->active)
353 {
354 q->active = 1;
355 /* Submit the request now if the cycle count is zero. */
356 if (q->cycles == 0)
357 submit_cache_request (q);
358 }
359 }
360 }
361
362 /* Check to see if a load is pending which affects the given register(s).
363 */
364 int
365 load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
366 {
367 int i;
368 for (i = 0; i < cache_queue.ix; ++i)
369 {
370 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
371
372 /* Must be the same kind of register. */
373 if (! q->active || q->request != cache_load || q->regtype != regtype)
374 continue;
375
376 /* If the registers numbers are equal, then we have a match. */
377 if (q->regnum == regnum)
378 return 1; /* load pending */
379
380 /* Check for overlap of a load with a multi-word register. */
381 if (regnum < q->regnum)
382 {
383 if (regnum + words > q->regnum)
384 return 1;
385 }
386 /* Check for overlap of a multi-word load with the register. */
387 else
388 {
389 int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
390 if (q->regnum + data_words > regnum)
391 return 1;
392 }
393 }
394
395 return 0; /* no load pending */
396 }
397
398 /* Check to see if a cache flush pending which affects the given address. */
399 static int
400 flush_pending_for_address (SIM_CPU *cpu, SI address)
401 {
402 int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
403 int i;
404 for (i = 0; i < cache_queue.ix; ++i)
405 {
406 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
407
408 /* Must be the same kind of request and active. */
409 if (! q->active || q->request != cache_flush)
410 continue;
411
412 /* If the addresses are equal, then we have a match. */
413 if ((q->address & line_mask) == (address & line_mask))
414 return 1; /* flush pending */
415 }
416
417 return 0; /* no flush pending */
418 }
419
420 static void
421 remove_cache_queue_element (SIM_CPU *cpu, int i)
422 {
423 /* If we are removing the load of a FR register, then remember which one(s).
424 */
425 CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
426
427 for (--cache_queue.ix; i < cache_queue.ix; ++i)
428 cache_queue.q[i] = cache_queue.q[i + 1];
429
430 /* If we removed a load of a FR register, check to see if any other loads
431 of that register is still queued. If not, then apply the queued post
432 processing time of that register to its latency. Also apply
433 1 extra cycle of latency to the register since it was a floating point
434 load. */
435 if (q.request == cache_load && q.regtype != REGTYPE_NONE)
436 {
437 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
438 int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
439 int j;
440 for (j = 0; j < data_words; ++j)
441 {
442 int regnum = q.regnum + j;
443 if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
444 {
445 if (q.regtype == REGTYPE_FR)
446 {
447 int *fr = ps->fr_busy;
448 fr[regnum] += 1 + ps->fr_ptime[regnum];
449 ps->fr_ptime[regnum] = 0;
450 }
451 }
452 }
453 }
454 }
455
456 /* Copy data from the cache buffer to the target register(s). */
457 static void
458 copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
459 CACHE_QUEUE_ELEMENT *q)
460 {
461 switch (q->length)
462 {
463 case 1:
464 if (q->regtype == REGTYPE_FR)
465 {
466 if (q->is_signed)
467 {
468 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
469 SET_H_FR (q->regnum, value);
470 }
471 else
472 {
473 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
474 SET_H_FR (q->regnum, value);
475 }
476 }
477 else
478 {
479 if (q->is_signed)
480 {
481 QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
482 SET_H_GR (q->regnum, value);
483 }
484 else
485 {
486 UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
487 SET_H_GR (q->regnum, value);
488 }
489 }
490 break;
491 case 2:
492 if (q->regtype == REGTYPE_FR)
493 {
494 if (q->is_signed)
495 {
496 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
497 SET_H_FR (q->regnum, value);
498 }
499 else
500 {
501 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
502 SET_H_FR (q->regnum, value);
503 }
504 }
505 else
506 {
507 if (q->is_signed)
508 {
509 HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
510 SET_H_GR (q->regnum, value);
511 }
512 else
513 {
514 UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
515 SET_H_GR (q->regnum, value);
516 }
517 }
518 break;
519 case 4:
520 if (q->regtype == REGTYPE_FR)
521 {
522 SET_H_FR (q->regnum,
523 CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
524 }
525 else
526 {
527 SET_H_GR (q->regnum,
528 CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
529 }
530 break;
531 case 8:
532 if (q->regtype == REGTYPE_FR)
533 {
534 SET_H_FR_DOUBLE (q->regnum,
535 CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
536 }
537 else
538 {
539 SET_H_GR_DOUBLE (q->regnum,
540 CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
541 }
542 break;
543 case 16:
544 if (q->regtype == REGTYPE_FR)
545 frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
546 CACHE_RETURN_DATA_ADDRESS (cache, slot,
547 q->address,
548 16));
549 else
550 frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
551 CACHE_RETURN_DATA_ADDRESS (cache, slot,
552 q->address,
553 16));
554 break;
555 default:
556 abort ();
557 }
558 }
559
560 static int
561 request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
562 {
563 FRV_CACHE* cache;
564 if (! q->active || q->cycles > 0)
565 return 0;
566
567 cache = CPU_DATA_CACHE (cpu);
568 switch (q->request)
569 {
570 case cache_load:
571 /* For loads, we must wait until the data is returned from the cache. */
572 if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
573 {
574 copy_load_data (cpu, cache, 0, q);
575 return 1;
576 }
577 if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
578 {
579 copy_load_data (cpu, cache, 1, q);
580 return 1;
581 }
582 break;
583
584 case cache_flush:
585 /* We must wait until the data is flushed. */
586 if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
587 return 1;
588 if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
589 return 1;
590 break;
591
592 default:
593 /* All other requests are complete once they've been made. */
594 return 1;
595 }
596
597 return 0;
598 }
599
600 /* Run the insn and data caches through the given number of cycles, taking
601 note of load requests which are fullfilled as a result. */
602 static void
603 run_caches (SIM_CPU *cpu, int cycles)
604 {
605 FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
606 FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
607 int i;
608 /* For each cycle, run the caches, noting which requests have been fullfilled
609 and submitting new requests on their designated cycles. */
610 for (i = 0; i < cycles; ++i)
611 {
612 int j;
613 /* Run the caches through 1 cycle. */
614 frv_cache_run (data_cache, 1);
615 frv_cache_run (insn_cache, 1);
616
617 /* Note whether prefetched insn data has been loaded yet. */
618 for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
619 {
620 if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
621 && frv_cache_data_in_buffer (insn_cache, j,
622 frv_insn_fetch_buffer[j].address,
623 frv_insn_fetch_buffer[j].reqno))
624 frv_insn_fetch_buffer[j].reqno = NO_REQNO;
625 }
626
627 /* Check to see which requests have been satisfied and which should
628 be submitted now. */
629 for (j = 0; j < cache_queue.ix; ++j)
630 {
631 CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
632 if (! q->active)
633 continue;
634
635 /* If a load has been satisfied, complete the operation and remove it
636 from the queue. */
637 if (request_complete (cpu, q))
638 {
639 remove_cache_queue_element (cpu, j);
640 --j;
641 continue;
642 }
643
644 /* Decrease the cycle count of each queued request.
645 Submit a request for each queued request whose cycle count has
646 become zero. */
647 --q->cycles;
648 if (q->cycles == 0)
649 submit_cache_request (q);
650 }
651 }
652 }
653
654 static void
655 apply_latency_adjustments (SIM_CPU *cpu)
656 {
657 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
658 int i;
659 /* update the latencies of the registers. */
660 int *fr = ps->fr_busy;
661 int *acc = ps->acc_busy;
662 for (i = 0; i < 64; ++i)
663 {
664 if (ps->fr_busy_adjust[i] > 0)
665 *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative. */
666 if (ps->acc_busy_adjust[i] > 0)
667 *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative. */
668 ++fr;
669 ++acc;
670 }
671 }
672
673 /* Account for the number of cycles which have just passed in the latency of
674 various system elements. Works for negative cycles too so that latency
675 can be extended in the case of insn fetch latency.
676 If negative or zero, then no adjustment is necessary. */
677 static void
678 update_latencies (SIM_CPU *cpu, int cycles)
679 {
680 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
681 int i;
682 /* update the latencies of the registers. */
683 int *fdiv;
684 int *fsqrt;
685 int *idiv;
686 int *flt;
687 int *media;
688 int *ccr;
689 int *gr = ps->gr_busy;
690 int *fr = ps->fr_busy;
691 int *acc = ps->acc_busy;
692 int *spr;
693 /* This loop handles GR, FR and ACC registers. */
694 for (i = 0; i < 64; ++i)
695 {
696 if (*gr <= cycles)
697 {
698 *gr = 0;
699 reset_gr_flags (cpu, i);
700 }
701 else
702 *gr -= cycles;
703 /* If the busy drops to 0, then mark the register as
704 "not in use". */
705 if (*fr <= cycles)
706 {
707 int *fr_lat = ps->fr_latency + i;
708 *fr = 0;
709 ps->fr_busy_adjust[i] = 0;
710 /* Only clear flags if this register has no target latency. */
711 if (*fr_lat == 0)
712 reset_fr_flags (cpu, i);
713 }
714 else
715 *fr -= cycles;
716 /* If the busy drops to 0, then mark the register as
717 "not in use". */
718 if (*acc <= cycles)
719 {
720 int *acc_lat = ps->acc_latency + i;
721 *acc = 0;
722 ps->acc_busy_adjust[i] = 0;
723 /* Only clear flags if this register has no target latency. */
724 if (*acc_lat == 0)
725 reset_acc_flags (cpu, i);
726 }
727 else
728 *acc -= cycles;
729 ++gr;
730 ++fr;
731 ++acc;
732 }
733 /* This loop handles CCR registers. */
734 ccr = ps->ccr_busy;
735 for (i = 0; i < 8; ++i)
736 {
737 if (*ccr <= cycles)
738 {
739 *ccr = 0;
740 reset_cc_flags (cpu, i);
741 }
742 else
743 *ccr -= cycles;
744 ++ccr;
745 }
746 /* This loop handles SPR registers. */
747 spr = ps->spr_busy;
748 for (i = 0; i < 4096; ++i)
749 {
750 if (*spr <= cycles)
751 *spr = 0;
752 else
753 *spr -= cycles;
754 ++spr;
755 }
756 /* This loop handles resources. */
757 idiv = ps->idiv_busy;
758 fdiv = ps->fdiv_busy;
759 fsqrt = ps->fsqrt_busy;
760 for (i = 0; i < 2; ++i)
761 {
762 *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
763 *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
764 *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
765 ++idiv;
766 ++fdiv;
767 ++fsqrt;
768 }
769 /* Float and media units can occur in 4 slots on some machines. */
770 flt = ps->float_busy;
771 media = ps->media_busy;
772 for (i = 0; i < 4; ++i)
773 {
774 *flt = (*flt <= cycles) ? 0 : (*flt - cycles);
775 *media = (*media <= cycles) ? 0 : (*media - cycles);
776 ++flt;
777 ++media;
778 }
779 }
780
781 /* Print information about the wait for the given number of cycles. */
782 void
783 frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
784 {
785 if (TRACE_INSN_P (cpu) && cycles > 0)
786 {
787 SIM_DESC sd = CPU_STATE (cpu);
788 trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
789 hazard_name, cycles);
790 }
791 }
792
793 void
794 trace_vliw_wait_cycles (SIM_CPU *cpu)
795 {
796 if (TRACE_INSN_P (cpu))
797 {
798 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
799 frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
800 }
801 }
802
803 /* Wait for the given number of cycles. */
804 void
805 frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
806 {
807 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
808 update_latencies (cpu, cycles);
809 run_caches (cpu, cycles);
810 PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
811 }
812
813 void
814 handle_resource_wait (SIM_CPU *cpu)
815 {
816 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
817 if (ps->vliw_wait != 0)
818 frv_model_advance_cycles (cpu, ps->vliw_wait);
819 if (ps->vliw_load_stall > ps->vliw_wait)
820 ps->vliw_load_stall -= ps->vliw_wait;
821 else
822 ps->vliw_load_stall = 0;
823 }
824
825 /* Account for the number of cycles until these resources will be available
826 again. */
827 static void
828 update_target_latencies (SIM_CPU *cpu)
829 {
830 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
831 int i;
832 /* update the latencies of the registers. */
833 int *ccr_lat;
834 int *gr_lat = ps->gr_latency;
835 int *fr_lat = ps->fr_latency;
836 int *acc_lat = ps->acc_latency;
837 int *spr_lat;
838 int *ccr;
839 int *gr = ps->gr_busy;
840 int *fr = ps->fr_busy;
841 int *acc = ps->acc_busy;
842 int *spr;
843 /* This loop handles GR, FR and ACC registers. */
844 for (i = 0; i < 64; ++i)
845 {
846 if (*gr_lat)
847 {
848 *gr = *gr_lat;
849 *gr_lat = 0;
850 }
851 if (*fr_lat)
852 {
853 *fr = *fr_lat;
854 *fr_lat = 0;
855 }
856 if (*acc_lat)
857 {
858 *acc = *acc_lat;
859 *acc_lat = 0;
860 }
861 ++gr; ++gr_lat;
862 ++fr; ++fr_lat;
863 ++acc; ++acc_lat;
864 }
865 /* This loop handles CCR registers. */
866 ccr = ps->ccr_busy;
867 ccr_lat = ps->ccr_latency;
868 for (i = 0; i < 8; ++i)
869 {
870 if (*ccr_lat)
871 {
872 *ccr = *ccr_lat;
873 *ccr_lat = 0;
874 }
875 ++ccr; ++ccr_lat;
876 }
877 /* This loop handles SPR registers. */
878 spr = ps->spr_busy;
879 spr_lat = ps->spr_latency;
880 for (i = 0; i < 4096; ++i)
881 {
882 if (*spr_lat)
883 {
884 *spr = *spr_lat;
885 *spr_lat = 0;
886 }
887 ++spr; ++spr_lat;
888 }
889 }
890
891 /* Run the caches until all pending cache flushes are complete. */
892 static void
893 wait_for_flush (SIM_CPU *cpu)
894 {
895 SI address = CPU_LOAD_ADDRESS (cpu);
896 int wait = 0;
897 while (flush_pending_for_address (cpu, address))
898 {
899 frv_model_advance_cycles (cpu, 1);
900 ++wait;
901 }
902 if (TRACE_INSN_P (cpu) && wait)
903 {
904 sprintf (hazard_name, "Data cache flush address %p:", address);
905 frv_model_trace_wait_cycles (cpu, wait, hazard_name);
906 }
907 }
908
909 /* Initialize cycle counting for an insn.
910 FIRST_P is non-zero if this is the first insn in a set of parallel
911 insns. */
912 void
913 frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
914 {
915 SIM_DESC sd = CPU_STATE (cpu);
916 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
917
918 ps->vliw_wait = 0;
919 ps->post_wait = 0;
920 memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
921 memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
922
923 if (first_p)
924 {
925 ps->vliw_insns++;
926 ps->vliw_cycles = 0;
927 ps->vliw_branch_taken = 0;
928 ps->vliw_load_stall = 0;
929 }
930
931 switch (STATE_ARCHITECTURE (sd)->mach)
932 {
933 case bfd_mach_fr400:
934 case bfd_mach_fr450:
935 fr400_model_insn_before (cpu, first_p);
936 break;
937 case bfd_mach_fr500:
938 fr500_model_insn_before (cpu, first_p);
939 break;
940 case bfd_mach_fr550:
941 fr550_model_insn_before (cpu, first_p);
942 break;
943 default:
944 break;
945 }
946
947 if (first_p)
948 wait_for_flush (cpu);
949 }
950
951 /* Record the cycles computed for an insn.
952 LAST_P is non-zero if this is the last insn in a set of parallel insns,
953 and we update the total cycle count.
954 CYCLES is the cycle count of the insn. */
955
956 void
957 frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
958 {
959 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
960 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
961 SIM_DESC sd = CPU_STATE (cpu);
962
963 PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
964
965 /* The number of cycles for a VLIW insn is the maximum number of cycles
966 used by any individual insn within it. */
967 if (cycles > ps->vliw_cycles)
968 ps->vliw_cycles = cycles;
969
970 if (last_p)
971 {
972 /* This is the last insn in a VLIW insn. */
973 struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
974
975 activate_cache_requests (cpu); /* before advancing cycles. */
976 apply_latency_adjustments (cpu); /* must go first. */
977 update_target_latencies (cpu); /* must go next. */
978 frv_model_advance_cycles (cpu, ps->vliw_cycles);
979
980 PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
981
982 /* Check the interrupt timer. cycles contains the total cycle count. */
983 if (timer->enabled)
984 {
985 cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
986 if (timer->current % timer->value
987 + (cycles - timer->current) >= timer->value)
988 frv_queue_external_interrupt (cpu, timer->interrupt);
989 timer->current = cycles;
990 }
991
992 ps->past_first_p = 0; /* Next one will be the first in a new VLIW. */
993 ps->branch_address = -1;
994 }
995 else
996 ps->past_first_p = 1;
997
998 switch (STATE_ARCHITECTURE (sd)->mach)
999 {
1000 case bfd_mach_fr400:
1001 case bfd_mach_fr450:
1002 fr400_model_insn_after (cpu, last_p, cycles);
1003 break;
1004 case bfd_mach_fr500:
1005 fr500_model_insn_after (cpu, last_p, cycles);
1006 break;
1007 case bfd_mach_fr550:
1008 fr550_model_insn_after (cpu, last_p, cycles);
1009 break;
1010 default:
1011 break;
1012 }
1013 }
1014
1015 USI
1016 frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
1017 {
1018 /* Record the hint and branch address for use in profiling. */
1019 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1020 ps->branch_hint = hint;
1021 ps->branch_address = target;
1022 }
1023
1024 /* Top up the latency of the given GR by the given number of cycles. */
1025 void
1026 update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1027 {
1028 if (out_GR >= 0)
1029 {
1030 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1031 int *gr = ps->gr_latency;
1032 if (gr[out_GR] < cycles)
1033 gr[out_GR] = cycles;
1034 }
1035 }
1036
1037 void
1038 decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
1039 {
1040 if (in_GR >= 0)
1041 {
1042 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1043 int *gr = ps->gr_busy;
1044 gr[in_GR] -= cycles;
1045 }
1046 }
1047
1048 /* Top up the latency of the given double GR by the number of cycles. */
1049 void
1050 update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1051 {
1052 if (out_GR >= 0)
1053 {
1054 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1055 int *gr = ps->gr_latency;
1056 if (gr[out_GR] < cycles)
1057 gr[out_GR] = cycles;
1058 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1059 gr[out_GR + 1] = cycles;
1060 }
1061 }
1062
1063 void
1064 update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1065 {
1066 if (out_GR >= 0)
1067 {
1068 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1069 int *gr = ps->gr_latency;
1070
1071 /* The latency of the GR will be at least the number of cycles used
1072 by the insn. */
1073 if (gr[out_GR] < cycles)
1074 gr[out_GR] = cycles;
1075
1076 /* The latency will also depend on how long it takes to retrieve the
1077 data from the cache or memory. Assume that the load is issued
1078 after the last cycle of the insn. */
1079 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1080 }
1081 }
1082
1083 void
1084 update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1085 {
1086 if (out_GR >= 0)
1087 {
1088 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1089 int *gr = ps->gr_latency;
1090
1091 /* The latency of the GR will be at least the number of cycles used
1092 by the insn. */
1093 if (gr[out_GR] < cycles)
1094 gr[out_GR] = cycles;
1095 if (out_GR < 63 && gr[out_GR + 1] < cycles)
1096 gr[out_GR + 1] = cycles;
1097
1098 /* The latency will also depend on how long it takes to retrieve the
1099 data from the cache or memory. Assume that the load is issued
1100 after the last cycle of the insn. */
1101 request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1102 }
1103 }
1104
1105 void
1106 update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
1107 {
1108 update_GR_latency_for_load (cpu, out_GR, cycles);
1109 }
1110
1111 /* Top up the latency of the given FR by the given number of cycles. */
1112 void
1113 update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1114 {
1115 if (out_FR >= 0)
1116 {
1117 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1118 int *fr = ps->fr_latency;
1119 if (fr[out_FR] < cycles)
1120 fr[out_FR] = cycles;
1121 }
1122 }
1123
1124 /* Top up the latency of the given double FR by the number of cycles. */
1125 void
1126 update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1127 {
1128 if (out_FR >= 0)
1129 {
1130 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1131 int *fr = ps->fr_latency;
1132 if (fr[out_FR] < cycles)
1133 fr[out_FR] = cycles;
1134 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1135 fr[out_FR + 1] = cycles;
1136 }
1137 }
1138
1139 void
1140 update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1141 {
1142 if (out_FR >= 0)
1143 {
1144 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1145 int *fr = ps->fr_latency;
1146
1147 /* The latency of the FR will be at least the number of cycles used
1148 by the insn. */
1149 if (fr[out_FR] < cycles)
1150 fr[out_FR] = cycles;
1151
1152 /* The latency will also depend on how long it takes to retrieve the
1153 data from the cache or memory. Assume that the load is issued
1154 after the last cycle of the insn. */
1155 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1156 }
1157 }
1158
1159 void
1160 update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1161 {
1162 if (out_FR >= 0)
1163 {
1164 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1165 int *fr = ps->fr_latency;
1166
1167 /* The latency of the FR will be at least the number of cycles used
1168 by the insn. */
1169 if (fr[out_FR] < cycles)
1170 fr[out_FR] = cycles;
1171 if (out_FR < 63 && fr[out_FR + 1] < cycles)
1172 fr[out_FR + 1] = cycles;
1173
1174 /* The latency will also depend on how long it takes to retrieve the
1175 data from the cache or memory. Assume that the load is issued
1176 after the last cycle of the insn. */
1177 request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1178 }
1179 }
1180
1181 /* Top up the post-processing time of the given FR by the given number of
1182 cycles. */
1183 void
1184 update_FR_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1185 {
1186 if (out_FR >= 0)
1187 {
1188 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1189 /* If a load is pending on this register, then add the cycles to
1190 the post processing time for this register. Otherwise apply it
1191 directly to the latency of the register. */
1192 if (! load_pending_for_register (cpu, out_FR, 1, REGTYPE_FR))
1193 {
1194 int *fr = ps->fr_latency;
1195 fr[out_FR] += cycles;
1196 }
1197 else
1198 ps->fr_ptime[out_FR] += cycles;
1199 }
1200 }
1201
1202 void
1203 update_FRdouble_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1204 {
1205 if (out_FR >= 0)
1206 {
1207 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1208 /* If a load is pending on this register, then add the cycles to
1209 the post processing time for this register. Otherwise apply it
1210 directly to the latency of the register. */
1211 if (! load_pending_for_register (cpu, out_FR, 2, REGTYPE_FR))
1212 {
1213 int *fr = ps->fr_latency;
1214 fr[out_FR] += cycles;
1215 if (out_FR < 63)
1216 fr[out_FR + 1] += cycles;
1217 }
1218 else
1219 {
1220 ps->fr_ptime[out_FR] += cycles;
1221 if (out_FR < 63)
1222 ps->fr_ptime[out_FR + 1] += cycles;
1223 }
1224 }
1225 }
1226
1227 /* Top up the post-processing time of the given ACC by the given number of
1228 cycles. */
1229 void
1230 update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
1231 {
1232 if (out_ACC >= 0)
1233 {
1234 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1235 /* No load can be pending on this register. Apply the cycles
1236 directly to the latency of the register. */
1237 int *acc = ps->acc_latency;
1238 acc[out_ACC] += cycles;
1239 }
1240 }
1241
1242 /* Top up the post-processing time of the given SPR by the given number of
1243 cycles. */
1244 void
1245 update_SPR_ptime (SIM_CPU *cpu, INT out_SPR, int cycles)
1246 {
1247 if (out_SPR >= 0)
1248 {
1249 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1250 /* No load can be pending on this register. Apply the cycles
1251 directly to the latency of the register. */
1252 int *spr = ps->spr_latency;
1253 spr[out_SPR] += cycles;
1254 }
1255 }
1256
1257 void
1258 decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1259 {
1260 if (out_ACC >= 0)
1261 {
1262 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1263 int *acc = ps->acc_busy;
1264 acc[out_ACC] -= cycles;
1265 if (ps->acc_busy_adjust[out_ACC] >= 0
1266 && cycles > ps->acc_busy_adjust[out_ACC])
1267 ps->acc_busy_adjust[out_ACC] = cycles;
1268 }
1269 }
1270
1271 void
1272 increase_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1273 {
1274 if (out_ACC >= 0)
1275 {
1276 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1277 int *acc = ps->acc_busy;
1278 acc[out_ACC] += cycles;
1279 }
1280 }
1281
1282 void
1283 enforce_full_acc_latency (SIM_CPU *cpu, INT in_ACC)
1284 {
1285 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1286 ps->acc_busy_adjust [in_ACC] = -1;
1287 }
1288
1289 void
1290 decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1291 {
1292 if (out_FR >= 0)
1293 {
1294 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1295 int *fr = ps->fr_busy;
1296 fr[out_FR] -= cycles;
1297 if (ps->fr_busy_adjust[out_FR] >= 0
1298 && cycles > ps->fr_busy_adjust[out_FR])
1299 ps->fr_busy_adjust[out_FR] = cycles;
1300 }
1301 }
1302
1303 void
1304 increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1305 {
1306 if (out_FR >= 0)
1307 {
1308 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1309 int *fr = ps->fr_busy;
1310 fr[out_FR] += cycles;
1311 }
1312 }
1313
1314 /* Top up the latency of the given ACC by the given number of cycles. */
1315 void
1316 update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
1317 {
1318 if (out_ACC >= 0)
1319 {
1320 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1321 int *acc = ps->acc_latency;
1322 if (acc[out_ACC] < cycles)
1323 acc[out_ACC] = cycles;
1324 }
1325 }
1326
1327 /* Top up the latency of the given CCR by the given number of cycles. */
1328 void
1329 update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
1330 {
1331 if (out_CCR >= 0)
1332 {
1333 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1334 int *ccr = ps->ccr_latency;
1335 if (ccr[out_CCR] < cycles)
1336 ccr[out_CCR] = cycles;
1337 }
1338 }
1339
1340 /* Top up the latency of the given SPR by the given number of cycles. */
1341 void
1342 update_SPR_latency (SIM_CPU *cpu, INT out_SPR, int cycles)
1343 {
1344 if (out_SPR >= 0)
1345 {
1346 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1347 int *spr = ps->spr_latency;
1348 if (spr[out_SPR] < cycles)
1349 spr[out_SPR] = cycles;
1350 }
1351 }
1352
1353 /* Top up the latency of the given integer division resource by the given
1354 number of cycles. */
1355 void
1356 update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1357 {
1358 /* operate directly on the busy cycles since each resource can only
1359 be used once in a VLIW insn. */
1360 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1361 int *r = ps->idiv_busy;
1362 r[in_resource] = cycles;
1363 }
1364
1365 /* Set the latency of the given resource to the given number of cycles. */
1366 void
1367 update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1368 {
1369 /* operate directly on the busy cycles since each resource can only
1370 be used once in a VLIW insn. */
1371 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1372 int *r = ps->fdiv_busy;
1373 r[in_resource] = cycles;
1374 }
1375
1376 /* Set the latency of the given resource to the given number of cycles. */
1377 void
1378 update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1379 {
1380 /* operate directly on the busy cycles since each resource can only
1381 be used once in a VLIW insn. */
1382 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1383 int *r = ps->fsqrt_busy;
1384 r[in_resource] = cycles;
1385 }
1386
1387 /* Set the latency of the given resource to the given number of cycles. */
1388 void
1389 update_float_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1390 {
1391 /* operate directly on the busy cycles since each resource can only
1392 be used once in a VLIW insn. */
1393 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1394 int *r = ps->float_busy;
1395 r[in_resource] = cycles;
1396 }
1397
1398 void
1399 update_media_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1400 {
1401 /* operate directly on the busy cycles since each resource can only
1402 be used once in a VLIW insn. */
1403 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1404 int *r = ps->media_busy;
1405 r[in_resource] = cycles;
1406 }
1407
1408 /* Set the branch penalty to the given number of cycles. */
1409 void
1410 update_branch_penalty (SIM_CPU *cpu, int cycles)
1411 {
1412 /* operate directly on the busy cycles since only one branch can occur
1413 in a VLIW insn. */
1414 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1415 ps->branch_penalty = cycles;
1416 }
1417
1418 /* Check the availability of the given GR register and update the number
1419 of cycles the current VLIW insn must wait until it is available. */
1420 void
1421 vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1422 {
1423 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1424 int *gr = ps->gr_busy;
1425 /* If the latency of the register is greater than the current wait
1426 then update the current wait. */
1427 if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
1428 {
1429 if (TRACE_INSN_P (cpu))
1430 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1431 ps->vliw_wait = gr[in_GR];
1432 }
1433 }
1434
1435 /* Check the availability of the given GR register and update the number
1436 of cycles the current VLIW insn must wait until it is available. */
1437 void
1438 vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1439 {
1440 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1441 int *gr = ps->gr_busy;
1442 /* If the latency of the register is greater than the current wait
1443 then update the current wait. */
1444 if (in_GR >= 0)
1445 {
1446 if (gr[in_GR] > ps->vliw_wait)
1447 {
1448 if (TRACE_INSN_P (cpu))
1449 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1450 ps->vliw_wait = gr[in_GR];
1451 }
1452 if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
1453 {
1454 if (TRACE_INSN_P (cpu))
1455 sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
1456 ps->vliw_wait = gr[in_GR + 1];
1457 }
1458 }
1459 }
1460
1461 /* Check the availability of the given FR register and update the number
1462 of cycles the current VLIW insn must wait until it is available. */
1463 void
1464 vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1465 {
1466 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1467 int *fr = ps->fr_busy;
1468 /* If the latency of the register is greater than the current wait
1469 then update the current wait. */
1470 if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
1471 {
1472 if (TRACE_INSN_P (cpu))
1473 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1474 ps->vliw_wait = fr[in_FR];
1475 }
1476 }
1477
1478 /* Check the availability of the given GR register and update the number
1479 of cycles the current VLIW insn must wait until it is available. */
1480 void
1481 vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1482 {
1483 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1484 int *fr = ps->fr_busy;
1485 /* If the latency of the register is greater than the current wait
1486 then update the current wait. */
1487 if (in_FR >= 0)
1488 {
1489 if (fr[in_FR] > ps->vliw_wait)
1490 {
1491 if (TRACE_INSN_P (cpu))
1492 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1493 ps->vliw_wait = fr[in_FR];
1494 }
1495 if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
1496 {
1497 if (TRACE_INSN_P (cpu))
1498 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1499 ps->vliw_wait = fr[in_FR + 1];
1500 }
1501 }
1502 }
1503
1504 /* Check the availability of the given CCR register and update the number
1505 of cycles the current VLIW insn must wait until it is available. */
1506 void
1507 vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1508 {
1509 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1510 int *ccr = ps->ccr_busy;
1511 /* If the latency of the register is greater than the current wait
1512 then update the current wait. */
1513 if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
1514 {
1515 if (TRACE_INSN_P (cpu))
1516 {
1517 if (in_CCR > 3)
1518 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
1519 else
1520 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1521 }
1522 ps->vliw_wait = ccr[in_CCR];
1523 }
1524 }
1525
1526 /* Check the availability of the given ACC register and update the number
1527 of cycles the current VLIW insn must wait until it is available. */
1528 void
1529 vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1530 {
1531 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1532 int *acc = ps->acc_busy;
1533 /* If the latency of the register is greater than the current wait
1534 then update the current wait. */
1535 if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
1536 {
1537 if (TRACE_INSN_P (cpu))
1538 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1539 ps->vliw_wait = acc[in_ACC];
1540 }
1541 }
1542
1543 /* Check the availability of the given SPR register and update the number
1544 of cycles the current VLIW insn must wait until it is available. */
1545 void
1546 vliw_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1547 {
1548 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1549 int *spr = ps->spr_busy;
1550 /* If the latency of the register is greater than the current wait
1551 then update the current wait. */
1552 if (in_SPR >= 0 && spr[in_SPR] > ps->vliw_wait)
1553 {
1554 if (TRACE_INSN_P (cpu))
1555 sprintf (hazard_name, "Data hazard for spr %d:", in_SPR);
1556 ps->vliw_wait = spr[in_SPR];
1557 }
1558 }
1559
1560 /* Check the availability of the given integer division resource and update
1561 the number of cycles the current VLIW insn must wait until it is available.
1562 */
1563 void
1564 vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
1565 {
1566 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1567 int *r = ps->idiv_busy;
1568 /* If the latency of the resource is greater than the current wait
1569 then update the current wait. */
1570 if (r[in_resource] > ps->vliw_wait)
1571 {
1572 if (TRACE_INSN_P (cpu))
1573 {
1574 sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1575 }
1576 ps->vliw_wait = r[in_resource];
1577 }
1578 }
1579
1580 /* Check the availability of the given float division resource and update
1581 the number of cycles the current VLIW insn must wait until it is available.
1582 */
1583 void
1584 vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
1585 {
1586 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1587 int *r = ps->fdiv_busy;
1588 /* If the latency of the resource is greater than the current wait
1589 then update the current wait. */
1590 if (r[in_resource] > ps->vliw_wait)
1591 {
1592 if (TRACE_INSN_P (cpu))
1593 {
1594 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", in_resource);
1595 }
1596 ps->vliw_wait = r[in_resource];
1597 }
1598 }
1599
1600 /* Check the availability of the given float square root resource and update
1601 the number of cycles the current VLIW insn must wait until it is available.
1602 */
1603 void
1604 vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
1605 {
1606 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1607 int *r = ps->fsqrt_busy;
1608 /* If the latency of the resource is greater than the current wait
1609 then update the current wait. */
1610 if (r[in_resource] > ps->vliw_wait)
1611 {
1612 if (TRACE_INSN_P (cpu))
1613 {
1614 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", in_resource);
1615 }
1616 ps->vliw_wait = r[in_resource];
1617 }
1618 }
1619
1620 /* Check the availability of the given float unit resource and update
1621 the number of cycles the current VLIW insn must wait until it is available.
1622 */
1623 void
1624 vliw_wait_for_float_resource (SIM_CPU *cpu, INT in_resource)
1625 {
1626 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1627 int *r = ps->float_busy;
1628 /* If the latency of the resource is greater than the current wait
1629 then update the current wait. */
1630 if (r[in_resource] > ps->vliw_wait)
1631 {
1632 if (TRACE_INSN_P (cpu))
1633 {
1634 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", in_resource);
1635 }
1636 ps->vliw_wait = r[in_resource];
1637 }
1638 }
1639
1640 /* Check the availability of the given media unit resource and update
1641 the number of cycles the current VLIW insn must wait until it is available.
1642 */
1643 void
1644 vliw_wait_for_media_resource (SIM_CPU *cpu, INT in_resource)
1645 {
1646 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1647 int *r = ps->media_busy;
1648 /* If the latency of the resource is greater than the current wait
1649 then update the current wait. */
1650 if (r[in_resource] > ps->vliw_wait)
1651 {
1652 if (TRACE_INSN_P (cpu))
1653 {
1654 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", in_resource);
1655 }
1656 ps->vliw_wait = r[in_resource];
1657 }
1658 }
1659
1660 /* Run the caches until all requests for the given register(s) are satisfied. */
1661 void
1662 load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1663 {
1664 if (in_GR >= 0)
1665 {
1666 int wait = 0;
1667 while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
1668 {
1669 frv_model_advance_cycles (cpu, 1);
1670 ++wait;
1671 }
1672 if (wait)
1673 {
1674 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1675 ps->vliw_wait += wait;
1676 ps->vliw_load_stall += wait;
1677 if (TRACE_INSN_P (cpu))
1678 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1679 }
1680 }
1681 }
1682
1683 void
1684 load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1685 {
1686 if (in_FR >= 0)
1687 {
1688 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1689 int *fr;
1690 int wait = 0;
1691 while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
1692 {
1693 frv_model_advance_cycles (cpu, 1);
1694 ++wait;
1695 }
1696 /* Post processing time may have been added to the register's
1697 latency after the loads were processed. Account for that too.
1698 */
1699 fr = ps->fr_busy;
1700 if (fr[in_FR])
1701 {
1702 wait += fr[in_FR];
1703 frv_model_advance_cycles (cpu, fr[in_FR]);
1704 }
1705 /* Update the vliw_wait with the number of cycles we waited for the
1706 load and any post-processing. */
1707 if (wait)
1708 {
1709 ps->vliw_wait += wait;
1710 ps->vliw_load_stall += wait;
1711 if (TRACE_INSN_P (cpu))
1712 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1713 }
1714 }
1715 }
1716
1717 void
1718 load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1719 {
1720 if (in_GR >= 0)
1721 {
1722 int wait = 0;
1723 while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
1724 {
1725 frv_model_advance_cycles (cpu, 1);
1726 ++wait;
1727 }
1728 if (wait)
1729 {
1730 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1731 ps->vliw_wait += wait;
1732 ps->vliw_load_stall += wait;
1733 if (TRACE_INSN_P (cpu))
1734 sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1735 }
1736 }
1737 }
1738
1739 void
1740 load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1741 {
1742 if (in_FR >= 0)
1743 {
1744 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1745 int *fr;
1746 int wait = 0;
1747 while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
1748 {
1749 frv_model_advance_cycles (cpu, 1);
1750 ++wait;
1751 }
1752 /* Post processing time may have been added to the registers'
1753 latencies after the loads were processed. Account for that too.
1754 */
1755 fr = ps->fr_busy;
1756 if (fr[in_FR])
1757 {
1758 wait += fr[in_FR];
1759 frv_model_advance_cycles (cpu, fr[in_FR]);
1760 }
1761 if (in_FR < 63)
1762 {
1763 if (fr[in_FR + 1])
1764 {
1765 wait += fr[in_FR + 1];
1766 frv_model_advance_cycles (cpu, fr[in_FR + 1]);
1767 }
1768 }
1769 /* Update the vliw_wait with the number of cycles we waited for the
1770 load and any post-processing. */
1771 if (wait)
1772 {
1773 ps->vliw_wait += wait;
1774 ps->vliw_load_stall += wait;
1775 if (TRACE_INSN_P (cpu))
1776 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1777 }
1778 }
1779 }
1780
1781 void
1782 enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
1783 {
1784 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1785 ps->fr_busy_adjust [in_FR] = -1;
1786 }
1787
1788 /* Calculate how long the post processing for a floating point insn must
1789 wait for resources to become available. */
1790 int
1791 post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1792 {
1793 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1794 int *fr = ps->fr_busy;
1795
1796 if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
1797 {
1798 ps->post_wait = fr[in_FR];
1799 if (TRACE_INSN_P (cpu))
1800 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1801 }
1802 }
1803
1804 /* Calculate how long the post processing for a floating point insn must
1805 wait for resources to become available. */
1806 int
1807 post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1808 {
1809 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1810 int *fr = ps->fr_busy;
1811
1812 if (in_FR >= 0)
1813 {
1814 if (fr[in_FR] > ps->post_wait)
1815 {
1816 ps->post_wait = fr[in_FR];
1817 if (TRACE_INSN_P (cpu))
1818 sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1819 }
1820 if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
1821 {
1822 ps->post_wait = fr[in_FR + 1];
1823 if (TRACE_INSN_P (cpu))
1824 sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1825 }
1826 }
1827 }
1828
1829 int
1830 post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1831 {
1832 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1833 int *acc = ps->acc_busy;
1834
1835 if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
1836 {
1837 ps->post_wait = acc[in_ACC];
1838 if (TRACE_INSN_P (cpu))
1839 sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1840 }
1841 }
1842
1843 int
1844 post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1845 {
1846 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1847 int *ccr = ps->ccr_busy;
1848
1849 if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
1850 {
1851 ps->post_wait = ccr[in_CCR];
1852 if (TRACE_INSN_P (cpu))
1853 {
1854 if (in_CCR > 3)
1855 sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
1856 else
1857 sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1858 }
1859 }
1860 }
1861
1862 int
1863 post_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1864 {
1865 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1866 int *spr = ps->spr_busy;
1867
1868 if (in_SPR >= 0 && spr[in_SPR] > ps->post_wait)
1869 {
1870 ps->post_wait = spr[in_SPR];
1871 if (TRACE_INSN_P (cpu))
1872 sprintf (hazard_name, "Data hazard for spr[%d]:", in_SPR);
1873 }
1874 }
1875
1876 int
1877 post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
1878 {
1879 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1880 int *fdiv = ps->fdiv_busy;
1881
1882 /* Multiple floating point divisions in the same slot need only wait 1
1883 extra cycle. */
1884 if (fdiv[slot] > 0 && 1 > ps->post_wait)
1885 {
1886 ps->post_wait = 1;
1887 if (TRACE_INSN_P (cpu))
1888 {
1889 sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
1890 }
1891 }
1892 }
1893
1894 int
1895 post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
1896 {
1897 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1898 int *fsqrt = ps->fsqrt_busy;
1899
1900 /* Multiple floating point square roots in the same slot need only wait 1
1901 extra cycle. */
1902 if (fsqrt[slot] > 0 && 1 > ps->post_wait)
1903 {
1904 ps->post_wait = 1;
1905 if (TRACE_INSN_P (cpu))
1906 {
1907 sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
1908 }
1909 }
1910 }
1911
1912 int
1913 post_wait_for_float (SIM_CPU *cpu, INT slot)
1914 {
1915 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1916 int *flt = ps->float_busy;
1917
1918 /* Multiple floating point square roots in the same slot need only wait 1
1919 extra cycle. */
1920 if (flt[slot] > ps->post_wait)
1921 {
1922 ps->post_wait = flt[slot];
1923 if (TRACE_INSN_P (cpu))
1924 {
1925 sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", slot);
1926 }
1927 }
1928 }
1929
1930 int
1931 post_wait_for_media (SIM_CPU *cpu, INT slot)
1932 {
1933 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1934 int *media = ps->media_busy;
1935
1936 /* Multiple floating point square roots in the same slot need only wait 1
1937 extra cycle. */
1938 if (media[slot] > ps->post_wait)
1939 {
1940 ps->post_wait = media[slot];
1941 if (TRACE_INSN_P (cpu))
1942 {
1943 sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", slot);
1944 }
1945 }
1946 }
1947
1948 /* Print cpu-specific profile information. */
1949 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1950
1951 static void
1952 print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
1953 {
1954 SIM_DESC sd = CPU_STATE (cpu);
1955
1956 if (cache != NULL)
1957 {
1958 char comma_buf[20];
1959 unsigned accesses;
1960
1961 sim_io_printf (sd, " %s Cache\n\n", cache_name);
1962 accesses = cache->statistics.accesses;
1963 sim_io_printf (sd, " Total accesses: %s\n", COMMAS (accesses));
1964 if (accesses != 0)
1965 {
1966 float rate;
1967 unsigned hits = cache->statistics.hits;
1968 sim_io_printf (sd, " Hits: %s\n", COMMAS (hits));
1969 rate = (float)hits / accesses;
1970 sim_io_printf (sd, " Hit rate: %.2f%%\n", rate * 100);
1971 }
1972 }
1973 else
1974 sim_io_printf (sd, " Model %s has no %s cache\n",
1975 MODEL_NAME (CPU_MODEL (cpu)), cache_name);
1976
1977 sim_io_printf (sd, "\n");
1978 }
1979
1980 /* This table must correspond to the UNIT_ATTR table in
1981 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1982 listed since the others cannot occur after mapping. */
1983 static char *
1984 slot_names[] =
1985 {
1986 "none",
1987 "I0", "I1", "I01", "I2", "I3", "IALL",
1988 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1989 "B0", "B1", "B01",
1990 "C"
1991 };
1992
1993 static void
1994 print_parallel (SIM_CPU *cpu, int verbose)
1995 {
1996 SIM_DESC sd = CPU_STATE (cpu);
1997 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1998 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1999 unsigned total, vliw;
2000 char comma_buf[20];
2001 float average;
2002
2003 sim_io_printf (sd, "Model %s Parallelization\n\n",
2004 MODEL_NAME (CPU_MODEL (cpu)));
2005
2006 total = PROFILE_TOTAL_INSN_COUNT (p);
2007 sim_io_printf (sd, " Total instructions: %s\n", COMMAS (total));
2008 vliw = ps->vliw_insns;
2009 sim_io_printf (sd, " VLIW instructions: %s\n", COMMAS (vliw));
2010 average = (float)total / vliw;
2011 sim_io_printf (sd, " Average VLIW length: %.2f\n", average);
2012 average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
2013 sim_io_printf (sd, " Cycles per VLIW instruction: %.2f\n", average);
2014 average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
2015 sim_io_printf (sd, " Instructions per cycle: %.2f\n", average);
2016
2017 if (verbose)
2018 {
2019 int i;
2020 int max_val = 0;
2021 int max_name_len = 0;
2022 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2023 {
2024 if (INSNS_IN_SLOT (i))
2025 {
2026 int len;
2027 if (INSNS_IN_SLOT (i) > max_val)
2028 max_val = INSNS_IN_SLOT (i);
2029 len = strlen (slot_names[i]);
2030 if (len > max_name_len)
2031 max_name_len = len;
2032 }
2033 }
2034 if (max_val > 0)
2035 {
2036 sim_io_printf (sd, "\n");
2037 sim_io_printf (sd, " Instructions per slot:\n");
2038 sim_io_printf (sd, "\n");
2039 for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2040 {
2041 if (INSNS_IN_SLOT (i) != 0)
2042 {
2043 sim_io_printf (sd, " %*s: %*s: ",
2044 max_name_len, slot_names[i],
2045 max_val < 10000 ? 5 : 10,
2046 COMMAS (INSNS_IN_SLOT (i)));
2047 sim_profile_print_bar (sd, cpu, PROFILE_HISTOGRAM_WIDTH,
2048 INSNS_IN_SLOT (i),
2049 max_val);
2050 sim_io_printf (sd, "\n");
2051 }
2052 }
2053 } /* details to print */
2054 } /* verbose */
2055
2056 sim_io_printf (sd, "\n");
2057 }
2058
2059 void
2060 frv_profile_info (SIM_CPU *cpu, int verbose)
2061 {
2062 /* FIXME: Need to add smp support. */
2063 PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
2064
2065 #if WITH_PROFILE_PARALLEL_P
2066 if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
2067 print_parallel (cpu, verbose);
2068 #endif
2069
2070 #if WITH_PROFILE_CACHE_P
2071 if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
2072 {
2073 SIM_DESC sd = CPU_STATE (cpu);
2074 sim_io_printf (sd, "Model %s Cache Statistics\n\n",
2075 MODEL_NAME (CPU_MODEL (cpu)));
2076 print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
2077 print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
2078 }
2079 #endif /* WITH_PROFILE_CACHE_P */
2080 }
2081
2082 /* A hack to get registers referenced for profiling. */
2083 SI frv_ref_SI (SI ref) {return ref;}
2084 #endif /* WITH_PROFILE_MODEL_P */