]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/frv/frv.c
gdb: remove unused includes in symfile.c
[thirdparty/binutils-gdb.git] / sim / frv / frv.c
1 /* frv simulator support code
2 Copyright (C) 1998-2024 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* This must come before any other includes. */
21 #include "defs.h"
22
23 #define WANT_CPU
24 #define WANT_CPU_FRVBF
25
26 #include "sim-main.h"
27 #include "cgen-mem.h"
28 #include "cgen-ops.h"
29 #include "cgen-engine.h"
30 #include "cgen-par.h"
31 #include "bfd.h"
32 #include "sim/sim-frv.h"
33 #include <math.h>
34 #include <stdlib.h>
35
36 /* Maintain a flag in order to know when to write the address of the next
37 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
38 insns. */
39 int frvbf_write_next_vliw_addr_to_LR;
40
41 /* The contents of BUF are in target byte order. */
42 int
43 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, void *buf, int len)
44 {
45 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
46 {
47 int hi_available, lo_available;
48 int grn = rn - SIM_FRV_GR0_REGNUM;
49
50 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
51
52 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
53 return 0;
54 else
55 SETTSI (buf, GET_H_GR (grn));
56 }
57 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
58 {
59 int hi_available, lo_available;
60 int frn = rn - SIM_FRV_FR0_REGNUM;
61
62 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
63
64 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
65 return 0;
66 else
67 SETTSI (buf, GET_H_FR (frn));
68 }
69 else if (rn == SIM_FRV_PC_REGNUM)
70 SETTSI (buf, GET_H_PC ());
71 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
72 {
73 /* Make sure the register is implemented. */
74 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
75 int spr = rn - SIM_FRV_SPR0_REGNUM;
76 if (! control->spr[spr].implemented)
77 return 0;
78 SETTSI (buf, GET_H_SPR (spr));
79 }
80 else
81 {
82 SETTSI (buf, 0xdeadbeef);
83 return 0;
84 }
85
86 return len;
87 }
88
89 /* The contents of BUF are in target byte order. */
90
91 int
92 frvbf_store_register (SIM_CPU *current_cpu, int rn, const void *buf, int len)
93 {
94 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
95 {
96 int hi_available, lo_available;
97 int grn = rn - SIM_FRV_GR0_REGNUM;
98
99 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
100
101 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
102 return 0;
103 else
104 SET_H_GR (grn, GETTSI (buf));
105 }
106 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
107 {
108 int hi_available, lo_available;
109 int frn = rn - SIM_FRV_FR0_REGNUM;
110
111 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
112
113 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
114 return 0;
115 else
116 SET_H_FR (frn, GETTSI (buf));
117 }
118 else if (rn == SIM_FRV_PC_REGNUM)
119 SET_H_PC (GETTSI (buf));
120 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
121 {
122 /* Make sure the register is implemented. */
123 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
124 int spr = rn - SIM_FRV_SPR0_REGNUM;
125 if (! control->spr[spr].implemented)
126 return 0;
127 SET_H_SPR (spr, GETTSI (buf));
128 }
129 else
130 return 0;
131
132 return len;
133 }
134 \f
135 /* Cover fns to access the general registers. */
136 USI
137 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
138 {
139 frv_check_gr_access (current_cpu, gr);
140 return CPU (h_gr[gr]);
141 }
142
143 void
144 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
145 {
146 frv_check_gr_access (current_cpu, gr);
147
148 if (gr == 0)
149 return; /* Storing into gr0 has no effect. */
150
151 CPU (h_gr[gr]) = newval;
152 }
153 \f
154 /* Cover fns to access the floating point registers. */
155 SF
156 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
157 {
158 frv_check_fr_access (current_cpu, fr);
159 return CPU (h_fr[fr]);
160 }
161
162 void
163 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
164 {
165 frv_check_fr_access (current_cpu, fr);
166 CPU (h_fr[fr]) = newval;
167 }
168 \f
169 /* Cover fns to access the general registers as double words. */
170 static UINT
171 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
172 {
173 if (reg & align_mask)
174 {
175 SIM_DESC sd = CPU_STATE (current_cpu);
176 switch (STATE_ARCHITECTURE (sd)->mach)
177 {
178 /* Note: there is a discrepancy between V2.2 of the FR400
179 instruction manual and the various FR4xx LSI specs.
180 The former claims that unaligned registers cause a
181 register_exception while the latter say it's an
182 illegal_instruction. The LSI specs appear to be
183 correct; in fact, the FR4xx series is not documented
184 as having a register_exception. */
185 case bfd_mach_fr400:
186 case bfd_mach_fr450:
187 case bfd_mach_fr550:
188 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
189 break;
190 case bfd_mach_frvtomcat:
191 case bfd_mach_fr500:
192 case bfd_mach_frv:
193 frv_queue_register_exception_interrupt (current_cpu,
194 FRV_REC_UNALIGNED);
195 break;
196 default:
197 break;
198 }
199
200 reg &= ~align_mask;
201 }
202
203 return reg;
204 }
205
206 static UINT
207 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
208 {
209 if (reg & align_mask)
210 {
211 SIM_DESC sd = CPU_STATE (current_cpu);
212 switch (STATE_ARCHITECTURE (sd)->mach)
213 {
214 /* See comment in check_register_alignment(). */
215 case bfd_mach_fr400:
216 case bfd_mach_fr450:
217 case bfd_mach_fr550:
218 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
219 break;
220 case bfd_mach_frvtomcat:
221 case bfd_mach_fr500:
222 case bfd_mach_frv:
223 {
224 struct frv_fp_exception_info fp_info = {
225 FSR_NO_EXCEPTION, FTT_INVALID_FR
226 };
227 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
228 }
229 break;
230 default:
231 break;
232 }
233
234 reg &= ~align_mask;
235 }
236
237 return reg;
238 }
239
240 static UINT
241 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
242 {
243 if (address & align_mask)
244 {
245 SIM_DESC sd = CPU_STATE (current_cpu);
246 switch (STATE_ARCHITECTURE (sd)->mach)
247 {
248 /* See comment in check_register_alignment(). */
249 case bfd_mach_fr400:
250 case bfd_mach_fr450:
251 frv_queue_data_access_error_interrupt (current_cpu, address);
252 break;
253 case bfd_mach_frvtomcat:
254 case bfd_mach_fr500:
255 case bfd_mach_frv:
256 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
257 break;
258 default:
259 break;
260 }
261
262 address &= ~align_mask;
263 }
264
265 return address;
266 }
267
268 DI
269 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
270 {
271 DI value;
272
273 if (gr == 0)
274 return 0; /* gr0 is always 0. */
275
276 /* Check the register alignment. */
277 gr = check_register_alignment (current_cpu, gr, 1);
278
279 value = GET_H_GR (gr);
280 value <<= 32;
281 value |= (USI) GET_H_GR (gr + 1);
282 return value;
283 }
284
285 void
286 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
287 {
288 if (gr == 0)
289 return; /* Storing into gr0 has no effect. */
290
291 /* Check the register alignment. */
292 gr = check_register_alignment (current_cpu, gr, 1);
293
294 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
295 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
296 }
297 \f
298 /* Cover fns to access the floating point register as double words. */
299 DF
300 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
301 {
302 union {
303 SF as_sf[2];
304 DF as_df;
305 } value;
306
307 /* Check the register alignment. */
308 fr = check_fr_register_alignment (current_cpu, fr, 1);
309
310 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
311 {
312 value.as_sf[1] = GET_H_FR (fr);
313 value.as_sf[0] = GET_H_FR (fr + 1);
314 }
315 else
316 {
317 value.as_sf[0] = GET_H_FR (fr);
318 value.as_sf[1] = GET_H_FR (fr + 1);
319 }
320
321 return value.as_df;
322 }
323
324 void
325 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
326 {
327 union {
328 SF as_sf[2];
329 DF as_df;
330 } value;
331
332 /* Check the register alignment. */
333 fr = check_fr_register_alignment (current_cpu, fr, 1);
334
335 value.as_df = newval;
336 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
337 {
338 SET_H_FR (fr , value.as_sf[1]);
339 SET_H_FR (fr + 1, value.as_sf[0]);
340 }
341 else
342 {
343 SET_H_FR (fr , value.as_sf[0]);
344 SET_H_FR (fr + 1, value.as_sf[1]);
345 }
346 }
347 \f
348 /* Cover fns to access the floating point register as integer words. */
349 USI
350 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
351 {
352 union {
353 SF as_sf;
354 USI as_usi;
355 } value;
356
357 value.as_sf = GET_H_FR (fr);
358 return value.as_usi;
359 }
360
361 void
362 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
363 {
364 union {
365 SF as_sf;
366 USI as_usi;
367 } value;
368
369 value.as_usi = newval;
370 SET_H_FR (fr, value.as_sf);
371 }
372 \f
373 /* Cover fns to access the coprocessor registers as double words. */
374 DI
375 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
376 {
377 DI value;
378
379 /* Check the register alignment. */
380 cpr = check_register_alignment (current_cpu, cpr, 1);
381
382 value = GET_H_CPR (cpr);
383 value <<= 32;
384 value |= (USI) GET_H_CPR (cpr + 1);
385 return value;
386 }
387
388 void
389 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
390 {
391 /* Check the register alignment. */
392 cpr = check_register_alignment (current_cpu, cpr, 1);
393
394 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
395 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
396 }
397 \f
398 /* Cover fns to write registers as quad words. */
399 void
400 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
401 {
402 if (gr == 0)
403 return; /* Storing into gr0 has no effect. */
404
405 /* Check the register alignment. */
406 gr = check_register_alignment (current_cpu, gr, 3);
407
408 SET_H_GR (gr , newval[0]);
409 SET_H_GR (gr + 1, newval[1]);
410 SET_H_GR (gr + 2, newval[2]);
411 SET_H_GR (gr + 3, newval[3]);
412 }
413
414 void
415 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
416 {
417 /* Check the register alignment. */
418 fr = check_fr_register_alignment (current_cpu, fr, 3);
419
420 SET_H_FR (fr , newval[0]);
421 SET_H_FR (fr + 1, newval[1]);
422 SET_H_FR (fr + 2, newval[2]);
423 SET_H_FR (fr + 3, newval[3]);
424 }
425
426 void
427 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
428 {
429 /* Check the register alignment. */
430 cpr = check_register_alignment (current_cpu, cpr, 3);
431
432 SET_H_CPR (cpr , newval[0]);
433 SET_H_CPR (cpr + 1, newval[1]);
434 SET_H_CPR (cpr + 2, newval[2]);
435 SET_H_CPR (cpr + 3, newval[3]);
436 }
437 \f
438 /* Cover fns to access the special purpose registers. */
439 USI
440 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
441 {
442 /* Check access restrictions. */
443 frv_check_spr_read_access (current_cpu, spr);
444
445 switch (spr)
446 {
447 case H_SPR_PSR:
448 return spr_psr_get_handler (current_cpu);
449 case H_SPR_TBR:
450 return spr_tbr_get_handler (current_cpu);
451 case H_SPR_BPSR:
452 return spr_bpsr_get_handler (current_cpu);
453 case H_SPR_CCR:
454 return spr_ccr_get_handler (current_cpu);
455 case H_SPR_CCCR:
456 return spr_cccr_get_handler (current_cpu);
457 case H_SPR_SR0:
458 case H_SPR_SR1:
459 case H_SPR_SR2:
460 case H_SPR_SR3:
461 return spr_sr_get_handler (current_cpu, spr);
462 break;
463 default:
464 return CPU (h_spr[spr]);
465 }
466 return 0;
467 }
468
469 void
470 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
471 {
472 FRV_REGISTER_CONTROL *control;
473 USI mask;
474 USI oldval;
475
476 /* Check access restrictions. */
477 frv_check_spr_write_access (current_cpu, spr);
478
479 /* Only set those fields which are writeable. */
480 control = CPU_REGISTER_CONTROL (current_cpu);
481 mask = control->spr[spr].read_only_mask;
482 oldval = GET_H_SPR (spr);
483
484 newval = (newval & ~mask) | (oldval & mask);
485
486 /* Some registers are represented by individual components which are
487 referenced more often than the register itself. */
488 switch (spr)
489 {
490 case H_SPR_PSR:
491 spr_psr_set_handler (current_cpu, newval);
492 break;
493 case H_SPR_TBR:
494 spr_tbr_set_handler (current_cpu, newval);
495 break;
496 case H_SPR_BPSR:
497 spr_bpsr_set_handler (current_cpu, newval);
498 break;
499 case H_SPR_CCR:
500 spr_ccr_set_handler (current_cpu, newval);
501 break;
502 case H_SPR_CCCR:
503 spr_cccr_set_handler (current_cpu, newval);
504 break;
505 case H_SPR_SR0:
506 case H_SPR_SR1:
507 case H_SPR_SR2:
508 case H_SPR_SR3:
509 spr_sr_set_handler (current_cpu, spr, newval);
510 break;
511 case H_SPR_IHSR8:
512 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
513 break;
514 default:
515 CPU (h_spr[spr]) = newval;
516 break;
517 }
518 }
519 \f
520 /* Cover fns to access the gr_hi and gr_lo registers. */
521 UHI
522 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
523 {
524 return (GET_H_GR(gr) >> 16) & 0xffff;
525 }
526
527 void
528 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
529 {
530 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
531 SET_H_GR (gr, value);
532 }
533
534 UHI
535 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
536 {
537 return GET_H_GR(gr) & 0xffff;
538 }
539
540 void
541 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
542 {
543 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
544 SET_H_GR (gr, value);
545 }
546 \f
547 /* Cover fns to access the tbr bits. */
548 USI
549 spr_tbr_get_handler (SIM_CPU *current_cpu)
550 {
551 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
552 ((GET_H_TBR_TT () & 0xff) << 4);
553
554 return tbr;
555 }
556
557 void
558 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
559 {
560 int tbr = newval;
561
562 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
563 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
564 }
565 \f
566 /* Cover fns to access the bpsr bits. */
567 USI
568 spr_bpsr_get_handler (SIM_CPU *current_cpu)
569 {
570 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
571 ((GET_H_BPSR_BET () & 0x1) );
572
573 return bpsr;
574 }
575
576 void
577 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
578 {
579 int bpsr = newval;
580
581 SET_H_BPSR_BS ((bpsr >> 12) & 1);
582 SET_H_BPSR_BET ((bpsr ) & 1);
583 }
584 \f
585 /* Cover fns to access the psr bits. */
586 USI
587 spr_psr_get_handler (SIM_CPU *current_cpu)
588 {
589 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
590 ((GET_H_PSR_VER () & 0xf) << 24) |
591 ((GET_H_PSR_ICE () & 0x1) << 16) |
592 ((GET_H_PSR_NEM () & 0x1) << 14) |
593 ((GET_H_PSR_CM () & 0x1) << 13) |
594 ((GET_H_PSR_BE () & 0x1) << 12) |
595 ((GET_H_PSR_ESR () & 0x1) << 11) |
596 ((GET_H_PSR_EF () & 0x1) << 8) |
597 ((GET_H_PSR_EM () & 0x1) << 7) |
598 ((GET_H_PSR_PIL () & 0xf) << 3) |
599 ((GET_H_PSR_S () & 0x1) << 2) |
600 ((GET_H_PSR_PS () & 0x1) << 1) |
601 ((GET_H_PSR_ET () & 0x1) );
602
603 return psr;
604 }
605
606 void
607 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
608 {
609 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
610 first. */
611 SET_H_PSR_S ((newval >> 2) & 1);
612
613 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
614 SET_H_PSR_VER ((newval >> 24) & 0xf);
615 SET_H_PSR_ICE ((newval >> 16) & 1);
616 SET_H_PSR_NEM ((newval >> 14) & 1);
617 SET_H_PSR_CM ((newval >> 13) & 1);
618 SET_H_PSR_BE ((newval >> 12) & 1);
619 SET_H_PSR_ESR ((newval >> 11) & 1);
620 SET_H_PSR_EF ((newval >> 8) & 1);
621 SET_H_PSR_EM ((newval >> 7) & 1);
622 SET_H_PSR_PIL ((newval >> 3) & 0xf);
623 SET_H_PSR_PS ((newval >> 1) & 1);
624 SET_H_PSR_ET ((newval ) & 1);
625 }
626
627 void
628 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
629 {
630 /* If switching from user to supervisor mode, or vice-versa, then switch
631 the supervisor/user context. */
632 int psr_s = GET_H_PSR_S ();
633 if (psr_s != (newval & 1))
634 {
635 frvbf_switch_supervisor_user_context (current_cpu);
636 CPU (h_psr_s) = newval & 1;
637 }
638 }
639 \f
640 /* Cover fns to access the ccr bits. */
641 USI
642 spr_ccr_get_handler (SIM_CPU *current_cpu)
643 {
644 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
645 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
646 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
647 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
648 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
649 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
650 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
651 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
652
653 return ccr;
654 }
655
656 void
657 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
658 {
659 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
660 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
661 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
662 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
663 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
664 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
665 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
666 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
667 }
668 \f
669 QI
670 frvbf_set_icc_for_shift_right (
671 SIM_CPU *current_cpu, SI value, SI shift, QI icc
672 )
673 {
674 /* Set the C flag of the given icc to the logical OR of the bits shifted
675 out. */
676 int mask = (1 << shift) - 1;
677 if ((value & mask) != 0)
678 return icc | 0x1;
679
680 return icc & 0xe;
681 }
682
683 QI
684 frvbf_set_icc_for_shift_left (
685 SIM_CPU *current_cpu, SI value, SI shift, QI icc
686 )
687 {
688 /* Set the V flag of the given icc to the logical OR of the bits shifted
689 out. */
690 int mask = ((1 << shift) - 1) << (32 - shift);
691 if ((value & mask) != 0)
692 return icc | 0x2;
693
694 return icc & 0xd;
695 }
696 \f
697 /* Cover fns to access the cccr bits. */
698 USI
699 spr_cccr_get_handler (SIM_CPU *current_cpu)
700 {
701 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
702 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
703 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
704 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
705 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
706 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
707 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
708 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
709
710 return cccr;
711 }
712
713 void
714 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
715 {
716 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
717 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
718 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
719 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
720 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
721 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
722 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
723 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
724 }
725 \f
726 /* Cover fns to access the sr bits. */
727 USI
728 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
729 {
730 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
731 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
732 int psr_esr = GET_H_PSR_ESR ();
733 if (! psr_esr)
734 return GET_H_GR (4 + (spr - H_SPR_SR0));
735
736 return CPU (h_spr[spr]);
737 }
738
739 void
740 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
741 {
742 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
743 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
744 int psr_esr = GET_H_PSR_ESR ();
745 if (! psr_esr)
746 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
747 else
748 CPU (h_spr[spr]) = newval;
749 }
750 \f
751 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
752 void
753 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
754 {
755 if (GET_H_PSR_ESR ())
756 {
757 /* We need to be in supervisor mode to swap the registers. Access the
758 PSR.S directly in order to avoid recursive context switches. */
759 int i;
760 int save_psr_s = CPU (h_psr_s);
761 CPU (h_psr_s) = 1;
762 for (i = 0; i < 4; ++i)
763 {
764 int gr = i + 4;
765 int spr = i + H_SPR_SR0;
766 SI tmp = GET_H_SPR (spr);
767 SET_H_SPR (spr, GET_H_GR (gr));
768 SET_H_GR (gr, tmp);
769 }
770 CPU (h_psr_s) = save_psr_s;
771 }
772 }
773 \f
774 /* Handle load/store of quad registers. */
775 void
776 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
777 {
778 int i;
779 SI value[4];
780
781 /* Check memory alignment */
782 address = check_memory_alignment (current_cpu, address, 0xf);
783
784 /* If we need to count cycles, then the cache operation will be
785 initiated from the model profiling functions.
786 See frvbf_model_.... */
787 if (model_insn)
788 {
789 CPU_LOAD_ADDRESS (current_cpu) = address;
790 CPU_LOAD_LENGTH (current_cpu) = 16;
791 }
792 else
793 {
794 for (i = 0; i < 4; ++i)
795 {
796 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
797 address += 4;
798 }
799 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
800 value);
801 }
802 }
803
804 void
805 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
806 {
807 int i;
808 SI value[4];
809 USI hsr0;
810
811 /* Check register and memory alignment. */
812 src_ix = check_register_alignment (current_cpu, src_ix, 3);
813 address = check_memory_alignment (current_cpu, address, 0xf);
814
815 for (i = 0; i < 4; ++i)
816 {
817 /* GR0 is always 0. */
818 if (src_ix == 0)
819 value[i] = 0;
820 else
821 value[i] = GET_H_GR (src_ix + i);
822 }
823 hsr0 = GET_HSR0 ();
824 if (GET_HSR0_DCE (hsr0))
825 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
826 else
827 sim_queue_mem_xi_write (current_cpu, address, value);
828 }
829
830 void
831 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
832 {
833 int i;
834 SI value[4];
835
836 /* Check memory alignment */
837 address = check_memory_alignment (current_cpu, address, 0xf);
838
839 /* If we need to count cycles, then the cache operation will be
840 initiated from the model profiling functions.
841 See frvbf_model_.... */
842 if (model_insn)
843 {
844 CPU_LOAD_ADDRESS (current_cpu) = address;
845 CPU_LOAD_LENGTH (current_cpu) = 16;
846 }
847 else
848 {
849 for (i = 0; i < 4; ++i)
850 {
851 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
852 address += 4;
853 }
854 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
855 value);
856 }
857 }
858
859 void
860 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
861 {
862 int i;
863 SI value[4];
864 USI hsr0;
865
866 /* Check register and memory alignment. */
867 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
868 address = check_memory_alignment (current_cpu, address, 0xf);
869
870 for (i = 0; i < 4; ++i)
871 value[i] = GET_H_FR (src_ix + i);
872
873 hsr0 = GET_HSR0 ();
874 if (GET_HSR0_DCE (hsr0))
875 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
876 else
877 sim_queue_mem_xi_write (current_cpu, address, value);
878 }
879
880 void
881 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
882 {
883 int i;
884 SI value[4];
885
886 /* Check memory alignment */
887 address = check_memory_alignment (current_cpu, address, 0xf);
888
889 /* If we need to count cycles, then the cache operation will be
890 initiated from the model profiling functions.
891 See frvbf_model_.... */
892 if (model_insn)
893 {
894 CPU_LOAD_ADDRESS (current_cpu) = address;
895 CPU_LOAD_LENGTH (current_cpu) = 16;
896 }
897 else
898 {
899 for (i = 0; i < 4; ++i)
900 {
901 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
902 address += 4;
903 }
904 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
905 value);
906 }
907 }
908
909 void
910 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
911 {
912 int i;
913 SI value[4];
914 USI hsr0;
915
916 /* Check register and memory alignment. */
917 src_ix = check_register_alignment (current_cpu, src_ix, 3);
918 address = check_memory_alignment (current_cpu, address, 0xf);
919
920 for (i = 0; i < 4; ++i)
921 value[i] = GET_H_CPR (src_ix + i);
922
923 hsr0 = GET_HSR0 ();
924 if (GET_HSR0_DCE (hsr0))
925 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
926 else
927 sim_queue_mem_xi_write (current_cpu, address, value);
928 }
929 \f
930 void
931 frvbf_signed_integer_divide (
932 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
933 )
934 {
935 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
936 if (arg1 == 0x80000000 && arg2 == -1)
937 {
938 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
939 otherwise it may result in 0x7fffffff (sparc compatibility) or
940 0x80000000 (C language compatibility). */
941 USI isr;
942 dtt = FRV_DTT_OVERFLOW;
943
944 isr = GET_ISR ();
945 if (GET_ISR_EDE (isr))
946 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
947 0x7fffffff);
948 else
949 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
950 0x80000000);
951 frvbf_force_update (current_cpu); /* Force update of target register. */
952 }
953 else if (arg2 == 0)
954 dtt = FRV_DTT_DIVISION_BY_ZERO;
955 else
956 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
957 arg1 / arg2);
958
959 /* Check for exceptions. */
960 if (dtt != FRV_DTT_NO_EXCEPTION)
961 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
962 non_excepting);
963 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
964 {
965 /* Non excepting instruction. Clear the NE flag for the target
966 register. */
967 SI NE_flags[2];
968 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
969 CLEAR_NE_FLAG (NE_flags, target_index);
970 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
971 }
972 }
973
974 void
975 frvbf_unsigned_integer_divide (
976 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
977 )
978 {
979 if (arg2 == 0)
980 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
981 target_index, non_excepting);
982 else
983 {
984 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
985 arg1 / arg2);
986 if (non_excepting)
987 {
988 /* Non excepting instruction. Clear the NE flag for the target
989 register. */
990 SI NE_flags[2];
991 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
992 CLEAR_NE_FLAG (NE_flags, target_index);
993 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
994 }
995 }
996 }
997 \f
998 /* Clear accumulators. */
999 void
1000 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
1001 {
1002 SIM_DESC sd = CPU_STATE (current_cpu);
1003 int acc_mask =
1004 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 :
1005 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 :
1006 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 :
1007 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 :
1008 63;
1009 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1010
1011 ps->mclracc_acc = acc_ix;
1012 ps->mclracc_A = A;
1013 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
1014 {
1015 /* This instruction is a nop if the referenced accumulator is not
1016 implemented. */
1017 if ((acc_ix & acc_mask) == acc_ix)
1018 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
1019 }
1020 else
1021 {
1022 /* Clear all implemented accumulators. */
1023 int i;
1024 for (i = 0; i <= acc_mask; ++i)
1025 if ((i & acc_mask) == i)
1026 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
1027 }
1028 }
1029 \f
1030 /* Functions to aid insn semantics. */
1031
1032 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1033 SI
1034 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
1035 {
1036 SI i;
1037 SI mask;
1038
1039 if (value == 0)
1040 return 63;
1041
1042 /* Find the position of the first non-zero bit.
1043 The loop will terminate since there is guaranteed to be at least one
1044 non-zero bit. */
1045 mask = 1 << (sizeof (mask) * 8 - 1);
1046 for (i = 0; (value & mask) == 0; ++i)
1047 value <<= 1;
1048
1049 return i;
1050 }
1051
1052 /* Compute the result of the cut insns. */
1053 SI
1054 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1055 {
1056 SI result;
1057 cut_point &= 0x3f;
1058 if (cut_point < 32)
1059 {
1060 result = reg1 << cut_point;
1061 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1062 }
1063 else
1064 result = reg2 << (cut_point - 32);
1065
1066 return result;
1067 }
1068
1069 /* Compute the result of the cut insns. */
1070 SI
1071 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1072 {
1073 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1074 cut_point = cut_point << 26 >> 26;
1075
1076 /* The cut_point is relative to bit 40 of 64 bits. */
1077 if (cut_point >= 0)
1078 return (acc << (cut_point + 24)) >> 32;
1079
1080 /* Extend the sign bit (bit 40) for negative cuts. */
1081 if (cut_point == -32)
1082 return (acc << 24) >> 63; /* Special case for full shiftout. */
1083
1084 return (acc << 24) >> (32 + -cut_point);
1085 }
1086
1087 /* Compute the result of the cut insns. */
1088 SI
1089 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1090 {
1091 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1092 cut_point = cut_point << 26 >> 26;
1093
1094 if (cut_point >= 0)
1095 {
1096 /* The cut_point is relative to bit 40 of 64 bits. */
1097 DI shifted = acc << (cut_point + 24);
1098 DI unshifted = shifted >> (cut_point + 24);
1099
1100 /* The result will be saturated if significant bits are shifted out. */
1101 if (unshifted != acc)
1102 {
1103 if (acc < 0)
1104 return 0x80000000;
1105 return 0x7fffffff;
1106 }
1107 }
1108
1109 /* The result will not be saturated, so use the code for the normal cut. */
1110 return frvbf_media_cut (current_cpu, acc, cut_point);
1111 }
1112
1113 /* Compute the result of int accumulator cut (SCUTSS). */
1114 SI
1115 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1116 {
1117 DI lower, upper;
1118
1119 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1120 cut_point = cut_point << 25 >> 25;
1121
1122 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1123 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1124 of this 128-bit value.
1125
1126 Since we can't deal with 128-bit values very easily, convert the
1127 operation into an equivalent 64-bit one. */
1128 if (cut_point < 0)
1129 {
1130 /* Avoid an undefined shift operation. */
1131 if (cut_point == -64)
1132 acc >>= 63;
1133 else
1134 acc >>= -cut_point;
1135 cut_point = 0;
1136 }
1137
1138 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1139 32 bits of the result and UPPER to the result >> 31. */
1140 if (cut_point < 32)
1141 {
1142 /* The cut loses the (32 - CUT_POINT) least significant bits.
1143 Round the result up if the most significant of these lost bits
1144 is 1. */
1145 lower = acc >> (32 - cut_point);
1146 if (lower < 0x7fffffff)
1147 if (acc & LSBIT64 (32 - cut_point - 1))
1148 lower++;
1149 upper = lower >> 31;
1150 }
1151 else
1152 {
1153 lower = acc << (cut_point - 32);
1154 upper = acc >> (63 - cut_point);
1155 }
1156
1157 /* Saturate the result. */
1158 if (upper < -1)
1159 return ~0x7fffffff;
1160 else if (upper > 0)
1161 return 0x7fffffff;
1162 else
1163 return lower;
1164 }
1165
1166 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1167 SI
1168 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1169 {
1170 int neg_arg1;
1171
1172 /* FIXME: what to do with negative shift amt? */
1173 if (arg2 <= 0)
1174 return arg1;
1175
1176 if (arg1 == 0)
1177 return 0;
1178
1179 /* Signed shift by 31 or greater saturates by definition. */
1180 if (arg2 >= 31)
1181 {
1182 if (arg1 > 0)
1183 return (SI) 0x7fffffff;
1184 else
1185 return (SI) 0x80000000;
1186 }
1187
1188 /* OK, arg2 is between 1 and 31. */
1189 neg_arg1 = (arg1 < 0);
1190 do {
1191 arg1 <<= 1;
1192 /* Check for sign bit change (saturation). */
1193 if (neg_arg1 && (arg1 >= 0))
1194 return (SI) 0x80000000;
1195 else if (!neg_arg1 && (arg1 < 0))
1196 return (SI) 0x7fffffff;
1197 } while (--arg2 > 0);
1198
1199 return arg1;
1200 }
1201
1202 /* Simulate the media custom insns. */
1203 void
1204 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1205 {
1206 /* The semantics of the insn are a nop, since it is implementation defined.
1207 We do need to check whether it's implemented and set up for MTRAP
1208 if it's not. */
1209 USI msr0 = GET_MSR (0);
1210 if (GET_MSR_EMCI (msr0) == 0)
1211 {
1212 /* no interrupt queued at this time. */
1213 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1214 }
1215 }
1216
1217 /* Simulate the media average (MAVEH) insn. */
1218 static HI
1219 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1220 {
1221 SIM_DESC sd = CPU_STATE (current_cpu);
1222 SI sum = (arg1 + arg2);
1223 HI result = sum >> 1;
1224 int rounding_value;
1225
1226 /* On fr4xx and fr550, check the rounding mode. On other machines
1227 rounding is always toward negative infinity and the result is
1228 already correctly rounded. */
1229 switch (STATE_ARCHITECTURE (sd)->mach)
1230 {
1231 /* Need to check rounding mode. */
1232 case bfd_mach_fr400:
1233 case bfd_mach_fr450:
1234 case bfd_mach_fr550:
1235 /* Check whether rounding will be required. Rounding will be required
1236 if the sum is an odd number. */
1237 rounding_value = sum & 1;
1238 if (rounding_value)
1239 {
1240 USI msr0 = GET_MSR (0);
1241 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1242 if (GET_MSR_SRDAV (msr0))
1243 {
1244 /* MSR0.RD controls rounding. */
1245 switch (GET_MSR_RD (msr0))
1246 {
1247 case 0:
1248 /* Round to nearest. */
1249 if (result >= 0)
1250 ++result;
1251 break;
1252 case 1:
1253 /* Round toward 0. */
1254 if (result < 0)
1255 ++result;
1256 break;
1257 case 2:
1258 /* Round toward positive infinity. */
1259 ++result;
1260 break;
1261 case 3:
1262 /* Round toward negative infinity. The result is already
1263 correctly rounded. */
1264 break;
1265 default:
1266 abort ();
1267 break;
1268 }
1269 }
1270 else
1271 {
1272 /* MSR0.RDAV controls rounding. If set, round toward positive
1273 infinity. Otherwise the result is already rounded correctly
1274 toward negative infinity. */
1275 if (GET_MSR_RDAV (msr0))
1276 ++result;
1277 }
1278 }
1279 break;
1280 default:
1281 break;
1282 }
1283
1284 return result;
1285 }
1286
1287 SI
1288 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1289 {
1290 SI result;
1291 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1292 result &= 0xffff;
1293 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1294 (reg2 >> 16) & 0xffff) << 16;
1295 return result;
1296 }
1297
1298 /* Maintain a flag in order to know when to write the address of the next
1299 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1300 void
1301 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1302 {
1303 frvbf_write_next_vliw_addr_to_LR = value;
1304 }
1305
1306 void
1307 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1308 {
1309 USI NE_flags[2];
1310
1311 /* Save the target register so interrupt processing can set its NE flag
1312 in the event of an exception. */
1313 frv_interrupt_state.ne_index = index;
1314
1315 /* Clear the NE flag of the target register. It will be reset if necessary
1316 in the event of an exception. */
1317 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1318 CLEAR_NE_FLAG (NE_flags, index);
1319 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1320 }
1321
1322 void
1323 frvbf_force_update (SIM_CPU *current_cpu)
1324 {
1325 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1326 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1327 if (ix > 0)
1328 {
1329 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1330 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1331 }
1332 }
1333 \f
1334 /* Condition code logic. */
1335 enum cr_ops {
1336 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1337 num_cr_ops
1338 };
1339
1340 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1341
1342 static enum cr_result
1343 cr_logic[num_cr_ops][4][4] = {
1344 /* andcr */
1345 {
1346 /* undefined undefined false true */
1347 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1348 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1349 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1350 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1351 },
1352 /* orcr */
1353 {
1354 /* undefined undefined false true */
1355 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1356 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1357 /* false */ {cr_false, cr_false, cr_false, cr_true },
1358 /* true */ {cr_true, cr_true, cr_true, cr_true }
1359 },
1360 /* xorcr */
1361 {
1362 /* undefined undefined false true */
1363 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1364 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1365 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1366 /* true */ {cr_true, cr_true, cr_true, cr_false }
1367 },
1368 /* nandcr */
1369 {
1370 /* undefined undefined false true */
1371 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1372 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1373 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1374 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1375 },
1376 /* norcr */
1377 {
1378 /* undefined undefined false true */
1379 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1380 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1381 /* false */ {cr_true, cr_true, cr_true, cr_false },
1382 /* true */ {cr_false, cr_false, cr_false, cr_false }
1383 },
1384 /* andncr */
1385 {
1386 /* undefined undefined false true */
1387 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1388 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1389 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1390 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1391 },
1392 /* orncr */
1393 {
1394 /* undefined undefined false true */
1395 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1396 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1397 /* false */ {cr_true, cr_true, cr_true, cr_true },
1398 /* true */ {cr_false, cr_false, cr_false, cr_true }
1399 },
1400 /* nandncr */
1401 {
1402 /* undefined undefined false true */
1403 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1404 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1405 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1406 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1407 },
1408 /* norncr */
1409 {
1410 /* undefined undefined false true */
1411 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1412 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1413 /* false */ {cr_false, cr_false, cr_false, cr_false },
1414 /* true */ {cr_true, cr_true, cr_true, cr_false }
1415 }
1416 };
1417
1418 UQI
1419 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1420 {
1421 return cr_logic[operation][arg1][arg2];
1422 }
1423 \f
1424 /* Cache Manipulation. */
1425 void
1426 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1427 {
1428 /* If we need to count cycles, then the cache operation will be
1429 initiated from the model profiling functions.
1430 See frvbf_model_.... */
1431 int hsr0 = GET_HSR0 ();
1432 if (GET_HSR0_ICE (hsr0))
1433 {
1434 if (model_insn)
1435 {
1436 CPU_LOAD_ADDRESS (current_cpu) = address;
1437 CPU_LOAD_LENGTH (current_cpu) = length;
1438 CPU_LOAD_LOCK (current_cpu) = lock;
1439 }
1440 else
1441 {
1442 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1443 frv_cache_preload (cache, address, length, lock);
1444 }
1445 }
1446 }
1447
1448 void
1449 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1450 {
1451 /* If we need to count cycles, then the cache operation will be
1452 initiated from the model profiling functions.
1453 See frvbf_model_.... */
1454 int hsr0 = GET_HSR0 ();
1455 if (GET_HSR0_DCE (hsr0))
1456 {
1457 if (model_insn)
1458 {
1459 CPU_LOAD_ADDRESS (current_cpu) = address;
1460 CPU_LOAD_LENGTH (current_cpu) = length;
1461 CPU_LOAD_LOCK (current_cpu) = lock;
1462 }
1463 else
1464 {
1465 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1466 frv_cache_preload (cache, address, length, lock);
1467 }
1468 }
1469 }
1470
1471 void
1472 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1473 {
1474 /* If we need to count cycles, then the cache operation will be
1475 initiated from the model profiling functions.
1476 See frvbf_model_.... */
1477 int hsr0 = GET_HSR0 ();
1478 if (GET_HSR0_ICE (hsr0))
1479 {
1480 if (model_insn)
1481 CPU_LOAD_ADDRESS (current_cpu) = address;
1482 else
1483 {
1484 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1485 frv_cache_unlock (cache, address);
1486 }
1487 }
1488 }
1489
1490 void
1491 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1492 {
1493 /* If we need to count cycles, then the cache operation will be
1494 initiated from the model profiling functions.
1495 See frvbf_model_.... */
1496 int hsr0 = GET_HSR0 ();
1497 if (GET_HSR0_DCE (hsr0))
1498 {
1499 if (model_insn)
1500 CPU_LOAD_ADDRESS (current_cpu) = address;
1501 else
1502 {
1503 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1504 frv_cache_unlock (cache, address);
1505 }
1506 }
1507 }
1508
1509 void
1510 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1511 {
1512 /* Make sure the insn was specified properly. -1 will be passed for ALL
1513 for a icei with A=0. */
1514 if (all == -1)
1515 {
1516 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1517 return;
1518 }
1519
1520 /* If we need to count cycles, then the cache operation will be
1521 initiated from the model profiling functions.
1522 See frvbf_model_.... */
1523 if (model_insn)
1524 {
1525 /* Record the all-entries flag for use in profiling. */
1526 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1527 ps->all_cache_entries = all;
1528 CPU_LOAD_ADDRESS (current_cpu) = address;
1529 }
1530 else
1531 {
1532 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1533 if (all)
1534 frv_cache_invalidate_all (cache, 0/* flush? */);
1535 else
1536 frv_cache_invalidate (cache, address, 0/* flush? */);
1537 }
1538 }
1539
1540 void
1541 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1542 {
1543 /* Make sure the insn was specified properly. -1 will be passed for ALL
1544 for a dcei with A=0. */
1545 if (all == -1)
1546 {
1547 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1548 return;
1549 }
1550
1551 /* If we need to count cycles, then the cache operation will be
1552 initiated from the model profiling functions.
1553 See frvbf_model_.... */
1554 if (model_insn)
1555 {
1556 /* Record the all-entries flag for use in profiling. */
1557 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1558 ps->all_cache_entries = all;
1559 CPU_LOAD_ADDRESS (current_cpu) = address;
1560 }
1561 else
1562 {
1563 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1564 if (all)
1565 frv_cache_invalidate_all (cache, 0/* flush? */);
1566 else
1567 frv_cache_invalidate (cache, address, 0/* flush? */);
1568 }
1569 }
1570
1571 void
1572 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1573 {
1574 /* Make sure the insn was specified properly. -1 will be passed for ALL
1575 for a dcef with A=0. */
1576 if (all == -1)
1577 {
1578 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1579 return;
1580 }
1581
1582 /* If we need to count cycles, then the cache operation will be
1583 initiated from the model profiling functions.
1584 See frvbf_model_.... */
1585 if (model_insn)
1586 {
1587 /* Record the all-entries flag for use in profiling. */
1588 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1589 ps->all_cache_entries = all;
1590 CPU_LOAD_ADDRESS (current_cpu) = address;
1591 }
1592 else
1593 {
1594 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1595 if (all)
1596 frv_cache_invalidate_all (cache, 1/* flush? */);
1597 else
1598 frv_cache_invalidate (cache, address, 1/* flush? */);
1599 }
1600 }