]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/frv/frv.c
Copyright updates for 2007.
[thirdparty/binutils-gdb.git] / sim / frv / frv.c
1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004, 2007
3 Free Software Foundation, Inc.
4 Contributed by Red Hat.
5
6 This file is part of the GNU simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21
22 #define WANT_CPU
23 #define WANT_CPU_FRVBF
24
25 #include "sim-main.h"
26 #include "cgen-mem.h"
27 #include "cgen-ops.h"
28 #include "cgen-engine.h"
29 #include "cgen-par.h"
30 #include "bfd.h"
31 #include "gdb/sim-frv.h"
32 #include <math.h>
33
34 /* Maintain a flag in order to know when to write the address of the next
35 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
36 insns. */
37 int frvbf_write_next_vliw_addr_to_LR;
38
39 /* The contents of BUF are in target byte order. */
40 int
41 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
42 {
43 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
44 {
45 int hi_available, lo_available;
46 int grn = rn - SIM_FRV_GR0_REGNUM;
47
48 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
49
50 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
51 return 0;
52 else
53 SETTSI (buf, GET_H_GR (grn));
54 }
55 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
56 {
57 int hi_available, lo_available;
58 int frn = rn - SIM_FRV_FR0_REGNUM;
59
60 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
61
62 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
63 return 0;
64 else
65 SETTSI (buf, GET_H_FR (frn));
66 }
67 else if (rn == SIM_FRV_PC_REGNUM)
68 SETTSI (buf, GET_H_PC ());
69 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
70 {
71 /* Make sure the register is implemented. */
72 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
73 int spr = rn - SIM_FRV_SPR0_REGNUM;
74 if (! control->spr[spr].implemented)
75 return 0;
76 SETTSI (buf, GET_H_SPR (spr));
77 }
78 else
79 {
80 SETTSI (buf, 0xdeadbeef);
81 return 0;
82 }
83
84 return len;
85 }
86
87 /* The contents of BUF are in target byte order. */
88
89 int
90 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
91 {
92 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
93 {
94 int hi_available, lo_available;
95 int grn = rn - SIM_FRV_GR0_REGNUM;
96
97 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
98
99 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
100 return 0;
101 else
102 SET_H_GR (grn, GETTSI (buf));
103 }
104 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
105 {
106 int hi_available, lo_available;
107 int frn = rn - SIM_FRV_FR0_REGNUM;
108
109 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
110
111 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
112 return 0;
113 else
114 SET_H_FR (frn, GETTSI (buf));
115 }
116 else if (rn == SIM_FRV_PC_REGNUM)
117 SET_H_PC (GETTSI (buf));
118 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
119 {
120 /* Make sure the register is implemented. */
121 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
122 int spr = rn - SIM_FRV_SPR0_REGNUM;
123 if (! control->spr[spr].implemented)
124 return 0;
125 SET_H_SPR (spr, GETTSI (buf));
126 }
127 else
128 return 0;
129
130 return len;
131 }
132 \f
133 /* Cover fns to access the general registers. */
134 USI
135 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
136 {
137 frv_check_gr_access (current_cpu, gr);
138 return CPU (h_gr[gr]);
139 }
140
141 void
142 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
143 {
144 frv_check_gr_access (current_cpu, gr);
145
146 if (gr == 0)
147 return; /* Storing into gr0 has no effect. */
148
149 CPU (h_gr[gr]) = newval;
150 }
151 \f
152 /* Cover fns to access the floating point registers. */
153 SF
154 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
155 {
156 frv_check_fr_access (current_cpu, fr);
157 return CPU (h_fr[fr]);
158 }
159
160 void
161 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
162 {
163 frv_check_fr_access (current_cpu, fr);
164 CPU (h_fr[fr]) = newval;
165 }
166 \f
167 /* Cover fns to access the general registers as double words. */
168 static UINT
169 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
170 {
171 if (reg & align_mask)
172 {
173 SIM_DESC sd = CPU_STATE (current_cpu);
174 switch (STATE_ARCHITECTURE (sd)->mach)
175 {
176 /* Note: there is a discrepancy between V2.2 of the FR400
177 instruction manual and the various FR4xx LSI specs.
178 The former claims that unaligned registers cause a
179 register_exception while the latter say it's an
180 illegal_instruction. The LSI specs appear to be
181 correct; in fact, the FR4xx series is not documented
182 as having a register_exception. */
183 case bfd_mach_fr400:
184 case bfd_mach_fr450:
185 case bfd_mach_fr550:
186 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
187 break;
188 case bfd_mach_frvtomcat:
189 case bfd_mach_fr500:
190 case bfd_mach_frv:
191 frv_queue_register_exception_interrupt (current_cpu,
192 FRV_REC_UNALIGNED);
193 break;
194 default:
195 break;
196 }
197
198 reg &= ~align_mask;
199 }
200
201 return reg;
202 }
203
204 static UINT
205 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
206 {
207 if (reg & align_mask)
208 {
209 SIM_DESC sd = CPU_STATE (current_cpu);
210 switch (STATE_ARCHITECTURE (sd)->mach)
211 {
212 /* See comment in check_register_alignment(). */
213 case bfd_mach_fr400:
214 case bfd_mach_fr450:
215 case bfd_mach_fr550:
216 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
217 break;
218 case bfd_mach_frvtomcat:
219 case bfd_mach_fr500:
220 case bfd_mach_frv:
221 {
222 struct frv_fp_exception_info fp_info = {
223 FSR_NO_EXCEPTION, FTT_INVALID_FR
224 };
225 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
226 }
227 break;
228 default:
229 break;
230 }
231
232 reg &= ~align_mask;
233 }
234
235 return reg;
236 }
237
238 static UINT
239 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
240 {
241 if (address & align_mask)
242 {
243 SIM_DESC sd = CPU_STATE (current_cpu);
244 switch (STATE_ARCHITECTURE (sd)->mach)
245 {
246 /* See comment in check_register_alignment(). */
247 case bfd_mach_fr400:
248 case bfd_mach_fr450:
249 frv_queue_data_access_error_interrupt (current_cpu, address);
250 break;
251 case bfd_mach_frvtomcat:
252 case bfd_mach_fr500:
253 case bfd_mach_frv:
254 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
255 break;
256 default:
257 break;
258 }
259
260 address &= ~align_mask;
261 }
262
263 return address;
264 }
265
266 DI
267 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
268 {
269 DI value;
270
271 if (gr == 0)
272 return 0; /* gr0 is always 0. */
273
274 /* Check the register alignment. */
275 gr = check_register_alignment (current_cpu, gr, 1);
276
277 value = GET_H_GR (gr);
278 value <<= 32;
279 value |= (USI) GET_H_GR (gr + 1);
280 return value;
281 }
282
283 void
284 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
285 {
286 if (gr == 0)
287 return; /* Storing into gr0 has no effect. */
288
289 /* Check the register alignment. */
290 gr = check_register_alignment (current_cpu, gr, 1);
291
292 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
293 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
294 }
295 \f
296 /* Cover fns to access the floating point register as double words. */
297 DF
298 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
299 {
300 union {
301 SF as_sf[2];
302 DF as_df;
303 } value;
304
305 /* Check the register alignment. */
306 fr = check_fr_register_alignment (current_cpu, fr, 1);
307
308 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
309 {
310 value.as_sf[1] = GET_H_FR (fr);
311 value.as_sf[0] = GET_H_FR (fr + 1);
312 }
313 else
314 {
315 value.as_sf[0] = GET_H_FR (fr);
316 value.as_sf[1] = GET_H_FR (fr + 1);
317 }
318
319 return value.as_df;
320 }
321
322 void
323 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
324 {
325 union {
326 SF as_sf[2];
327 DF as_df;
328 } value;
329
330 /* Check the register alignment. */
331 fr = check_fr_register_alignment (current_cpu, fr, 1);
332
333 value.as_df = newval;
334 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
335 {
336 SET_H_FR (fr , value.as_sf[1]);
337 SET_H_FR (fr + 1, value.as_sf[0]);
338 }
339 else
340 {
341 SET_H_FR (fr , value.as_sf[0]);
342 SET_H_FR (fr + 1, value.as_sf[1]);
343 }
344 }
345 \f
346 /* Cover fns to access the floating point register as integer words. */
347 USI
348 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
349 {
350 union {
351 SF as_sf;
352 USI as_usi;
353 } value;
354
355 value.as_sf = GET_H_FR (fr);
356 return value.as_usi;
357 }
358
359 void
360 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
361 {
362 union {
363 SF as_sf;
364 USI as_usi;
365 } value;
366
367 value.as_usi = newval;
368 SET_H_FR (fr, value.as_sf);
369 }
370 \f
371 /* Cover fns to access the coprocessor registers as double words. */
372 DI
373 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
374 {
375 DI value;
376
377 /* Check the register alignment. */
378 cpr = check_register_alignment (current_cpu, cpr, 1);
379
380 value = GET_H_CPR (cpr);
381 value <<= 32;
382 value |= (USI) GET_H_CPR (cpr + 1);
383 return value;
384 }
385
386 void
387 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
388 {
389 /* Check the register alignment. */
390 cpr = check_register_alignment (current_cpu, cpr, 1);
391
392 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
393 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
394 }
395 \f
396 /* Cover fns to write registers as quad words. */
397 void
398 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
399 {
400 if (gr == 0)
401 return; /* Storing into gr0 has no effect. */
402
403 /* Check the register alignment. */
404 gr = check_register_alignment (current_cpu, gr, 3);
405
406 SET_H_GR (gr , newval[0]);
407 SET_H_GR (gr + 1, newval[1]);
408 SET_H_GR (gr + 2, newval[2]);
409 SET_H_GR (gr + 3, newval[3]);
410 }
411
412 void
413 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
414 {
415 /* Check the register alignment. */
416 fr = check_fr_register_alignment (current_cpu, fr, 3);
417
418 SET_H_FR (fr , newval[0]);
419 SET_H_FR (fr + 1, newval[1]);
420 SET_H_FR (fr + 2, newval[2]);
421 SET_H_FR (fr + 3, newval[3]);
422 }
423
424 void
425 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
426 {
427 /* Check the register alignment. */
428 cpr = check_register_alignment (current_cpu, cpr, 3);
429
430 SET_H_CPR (cpr , newval[0]);
431 SET_H_CPR (cpr + 1, newval[1]);
432 SET_H_CPR (cpr + 2, newval[2]);
433 SET_H_CPR (cpr + 3, newval[3]);
434 }
435 \f
436 /* Cover fns to access the special purpose registers. */
437 USI
438 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
439 {
440 /* Check access restrictions. */
441 frv_check_spr_read_access (current_cpu, spr);
442
443 switch (spr)
444 {
445 case H_SPR_PSR:
446 return spr_psr_get_handler (current_cpu);
447 case H_SPR_TBR:
448 return spr_tbr_get_handler (current_cpu);
449 case H_SPR_BPSR:
450 return spr_bpsr_get_handler (current_cpu);
451 case H_SPR_CCR:
452 return spr_ccr_get_handler (current_cpu);
453 case H_SPR_CCCR:
454 return spr_cccr_get_handler (current_cpu);
455 case H_SPR_SR0:
456 case H_SPR_SR1:
457 case H_SPR_SR2:
458 case H_SPR_SR3:
459 return spr_sr_get_handler (current_cpu, spr);
460 break;
461 default:
462 return CPU (h_spr[spr]);
463 }
464 return 0;
465 }
466
467 void
468 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
469 {
470 FRV_REGISTER_CONTROL *control;
471 USI mask;
472 USI oldval;
473
474 /* Check access restrictions. */
475 frv_check_spr_write_access (current_cpu, spr);
476
477 /* Only set those fields which are writeable. */
478 control = CPU_REGISTER_CONTROL (current_cpu);
479 mask = control->spr[spr].read_only_mask;
480 oldval = GET_H_SPR (spr);
481
482 newval = (newval & ~mask) | (oldval & mask);
483
484 /* Some registers are represented by individual components which are
485 referenced more often than the register itself. */
486 switch (spr)
487 {
488 case H_SPR_PSR:
489 spr_psr_set_handler (current_cpu, newval);
490 break;
491 case H_SPR_TBR:
492 spr_tbr_set_handler (current_cpu, newval);
493 break;
494 case H_SPR_BPSR:
495 spr_bpsr_set_handler (current_cpu, newval);
496 break;
497 case H_SPR_CCR:
498 spr_ccr_set_handler (current_cpu, newval);
499 break;
500 case H_SPR_CCCR:
501 spr_cccr_set_handler (current_cpu, newval);
502 break;
503 case H_SPR_SR0:
504 case H_SPR_SR1:
505 case H_SPR_SR2:
506 case H_SPR_SR3:
507 spr_sr_set_handler (current_cpu, spr, newval);
508 break;
509 case H_SPR_IHSR8:
510 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
511 break;
512 default:
513 CPU (h_spr[spr]) = newval;
514 break;
515 }
516 }
517 \f
518 /* Cover fns to access the gr_hi and gr_lo registers. */
519 UHI
520 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
521 {
522 return (GET_H_GR(gr) >> 16) & 0xffff;
523 }
524
525 void
526 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
527 {
528 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
529 SET_H_GR (gr, value);
530 }
531
532 UHI
533 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
534 {
535 return GET_H_GR(gr) & 0xffff;
536 }
537
538 void
539 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
540 {
541 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
542 SET_H_GR (gr, value);
543 }
544 \f
545 /* Cover fns to access the tbr bits. */
546 USI
547 spr_tbr_get_handler (SIM_CPU *current_cpu)
548 {
549 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
550 ((GET_H_TBR_TT () & 0xff) << 4);
551
552 return tbr;
553 }
554
555 void
556 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
557 {
558 int tbr = newval;
559
560 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
561 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
562 }
563 \f
564 /* Cover fns to access the bpsr bits. */
565 USI
566 spr_bpsr_get_handler (SIM_CPU *current_cpu)
567 {
568 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
569 ((GET_H_BPSR_BET () & 0x1) );
570
571 return bpsr;
572 }
573
574 void
575 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
576 {
577 int bpsr = newval;
578
579 SET_H_BPSR_BS ((bpsr >> 12) & 1);
580 SET_H_BPSR_BET ((bpsr ) & 1);
581 }
582 \f
583 /* Cover fns to access the psr bits. */
584 USI
585 spr_psr_get_handler (SIM_CPU *current_cpu)
586 {
587 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
588 ((GET_H_PSR_VER () & 0xf) << 24) |
589 ((GET_H_PSR_ICE () & 0x1) << 16) |
590 ((GET_H_PSR_NEM () & 0x1) << 14) |
591 ((GET_H_PSR_CM () & 0x1) << 13) |
592 ((GET_H_PSR_BE () & 0x1) << 12) |
593 ((GET_H_PSR_ESR () & 0x1) << 11) |
594 ((GET_H_PSR_EF () & 0x1) << 8) |
595 ((GET_H_PSR_EM () & 0x1) << 7) |
596 ((GET_H_PSR_PIL () & 0xf) << 3) |
597 ((GET_H_PSR_S () & 0x1) << 2) |
598 ((GET_H_PSR_PS () & 0x1) << 1) |
599 ((GET_H_PSR_ET () & 0x1) );
600
601 return psr;
602 }
603
604 void
605 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
606 {
607 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
608 first. */
609 SET_H_PSR_S ((newval >> 2) & 1);
610
611 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
612 SET_H_PSR_VER ((newval >> 24) & 0xf);
613 SET_H_PSR_ICE ((newval >> 16) & 1);
614 SET_H_PSR_NEM ((newval >> 14) & 1);
615 SET_H_PSR_CM ((newval >> 13) & 1);
616 SET_H_PSR_BE ((newval >> 12) & 1);
617 SET_H_PSR_ESR ((newval >> 11) & 1);
618 SET_H_PSR_EF ((newval >> 8) & 1);
619 SET_H_PSR_EM ((newval >> 7) & 1);
620 SET_H_PSR_PIL ((newval >> 3) & 0xf);
621 SET_H_PSR_PS ((newval >> 1) & 1);
622 SET_H_PSR_ET ((newval ) & 1);
623 }
624
625 void
626 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
627 {
628 /* If switching from user to supervisor mode, or vice-versa, then switch
629 the supervisor/user context. */
630 int psr_s = GET_H_PSR_S ();
631 if (psr_s != (newval & 1))
632 {
633 frvbf_switch_supervisor_user_context (current_cpu);
634 CPU (h_psr_s) = newval & 1;
635 }
636 }
637 \f
638 /* Cover fns to access the ccr bits. */
639 USI
640 spr_ccr_get_handler (SIM_CPU *current_cpu)
641 {
642 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
643 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
644 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
645 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
646 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
647 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
648 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
649 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
650
651 return ccr;
652 }
653
654 void
655 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
656 {
657 int ccr = newval;
658
659 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
660 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
661 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
662 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
663 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
664 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
665 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
666 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
667 }
668 \f
669 QI
670 frvbf_set_icc_for_shift_right (
671 SIM_CPU *current_cpu, SI value, SI shift, QI icc
672 )
673 {
674 /* Set the C flag of the given icc to the logical OR of the bits shifted
675 out. */
676 int mask = (1 << shift) - 1;
677 if ((value & mask) != 0)
678 return icc | 0x1;
679
680 return icc & 0xe;
681 }
682
683 QI
684 frvbf_set_icc_for_shift_left (
685 SIM_CPU *current_cpu, SI value, SI shift, QI icc
686 )
687 {
688 /* Set the V flag of the given icc to the logical OR of the bits shifted
689 out. */
690 int mask = ((1 << shift) - 1) << (32 - shift);
691 if ((value & mask) != 0)
692 return icc | 0x2;
693
694 return icc & 0xd;
695 }
696 \f
697 /* Cover fns to access the cccr bits. */
698 USI
699 spr_cccr_get_handler (SIM_CPU *current_cpu)
700 {
701 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
702 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
703 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
704 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
705 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
706 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
707 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
708 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
709
710 return cccr;
711 }
712
713 void
714 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
715 {
716 int cccr = newval;
717
718 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
719 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
720 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
721 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
722 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
723 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
724 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
725 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
726 }
727 \f
728 /* Cover fns to access the sr bits. */
729 USI
730 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
731 {
732 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
733 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
734 int psr_esr = GET_H_PSR_ESR ();
735 if (! psr_esr)
736 return GET_H_GR (4 + (spr - H_SPR_SR0));
737
738 return CPU (h_spr[spr]);
739 }
740
741 void
742 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
743 {
744 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
745 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
746 int psr_esr = GET_H_PSR_ESR ();
747 if (! psr_esr)
748 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
749 else
750 CPU (h_spr[spr]) = newval;
751 }
752 \f
753 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
754 void
755 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
756 {
757 if (GET_H_PSR_ESR ())
758 {
759 /* We need to be in supervisor mode to swap the registers. Access the
760 PSR.S directly in order to avoid recursive context switches. */
761 int i;
762 int save_psr_s = CPU (h_psr_s);
763 CPU (h_psr_s) = 1;
764 for (i = 0; i < 4; ++i)
765 {
766 int gr = i + 4;
767 int spr = i + H_SPR_SR0;
768 SI tmp = GET_H_SPR (spr);
769 SET_H_SPR (spr, GET_H_GR (gr));
770 SET_H_GR (gr, tmp);
771 }
772 CPU (h_psr_s) = save_psr_s;
773 }
774 }
775 \f
776 /* Handle load/store of quad registers. */
777 void
778 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
779 {
780 int i;
781 SI value[4];
782
783 /* Check memory alignment */
784 address = check_memory_alignment (current_cpu, address, 0xf);
785
786 /* If we need to count cycles, then the cache operation will be
787 initiated from the model profiling functions.
788 See frvbf_model_.... */
789 if (model_insn)
790 {
791 CPU_LOAD_ADDRESS (current_cpu) = address;
792 CPU_LOAD_LENGTH (current_cpu) = 16;
793 }
794 else
795 {
796 for (i = 0; i < 4; ++i)
797 {
798 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
799 address += 4;
800 }
801 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
802 value);
803 }
804 }
805
806 void
807 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
808 {
809 int i;
810 SI value[4];
811 USI hsr0;
812
813 /* Check register and memory alignment. */
814 src_ix = check_register_alignment (current_cpu, src_ix, 3);
815 address = check_memory_alignment (current_cpu, address, 0xf);
816
817 for (i = 0; i < 4; ++i)
818 {
819 /* GR0 is always 0. */
820 if (src_ix == 0)
821 value[i] = 0;
822 else
823 value[i] = GET_H_GR (src_ix + i);
824 }
825 hsr0 = GET_HSR0 ();
826 if (GET_HSR0_DCE (hsr0))
827 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
828 else
829 sim_queue_mem_xi_write (current_cpu, address, value);
830 }
831
832 void
833 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
834 {
835 int i;
836 SI value[4];
837
838 /* Check memory alignment */
839 address = check_memory_alignment (current_cpu, address, 0xf);
840
841 /* If we need to count cycles, then the cache operation will be
842 initiated from the model profiling functions.
843 See frvbf_model_.... */
844 if (model_insn)
845 {
846 CPU_LOAD_ADDRESS (current_cpu) = address;
847 CPU_LOAD_LENGTH (current_cpu) = 16;
848 }
849 else
850 {
851 for (i = 0; i < 4; ++i)
852 {
853 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
854 address += 4;
855 }
856 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
857 value);
858 }
859 }
860
861 void
862 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
863 {
864 int i;
865 SI value[4];
866 USI hsr0;
867
868 /* Check register and memory alignment. */
869 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
870 address = check_memory_alignment (current_cpu, address, 0xf);
871
872 for (i = 0; i < 4; ++i)
873 value[i] = GET_H_FR (src_ix + i);
874
875 hsr0 = GET_HSR0 ();
876 if (GET_HSR0_DCE (hsr0))
877 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
878 else
879 sim_queue_mem_xi_write (current_cpu, address, value);
880 }
881
882 void
883 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
884 {
885 int i;
886 SI value[4];
887
888 /* Check memory alignment */
889 address = check_memory_alignment (current_cpu, address, 0xf);
890
891 /* If we need to count cycles, then the cache operation will be
892 initiated from the model profiling functions.
893 See frvbf_model_.... */
894 if (model_insn)
895 {
896 CPU_LOAD_ADDRESS (current_cpu) = address;
897 CPU_LOAD_LENGTH (current_cpu) = 16;
898 }
899 else
900 {
901 for (i = 0; i < 4; ++i)
902 {
903 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
904 address += 4;
905 }
906 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
907 value);
908 }
909 }
910
911 void
912 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
913 {
914 int i;
915 SI value[4];
916 USI hsr0;
917
918 /* Check register and memory alignment. */
919 src_ix = check_register_alignment (current_cpu, src_ix, 3);
920 address = check_memory_alignment (current_cpu, address, 0xf);
921
922 for (i = 0; i < 4; ++i)
923 value[i] = GET_H_CPR (src_ix + i);
924
925 hsr0 = GET_HSR0 ();
926 if (GET_HSR0_DCE (hsr0))
927 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
928 else
929 sim_queue_mem_xi_write (current_cpu, address, value);
930 }
931 \f
932 void
933 frvbf_signed_integer_divide (
934 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
935 )
936 {
937 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
938 if (arg1 == 0x80000000 && arg2 == -1)
939 {
940 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
941 otherwise it may result in 0x7fffffff (sparc compatibility) or
942 0x80000000 (C language compatibility). */
943 USI isr;
944 dtt = FRV_DTT_OVERFLOW;
945
946 isr = GET_ISR ();
947 if (GET_ISR_EDE (isr))
948 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
949 0x7fffffff);
950 else
951 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
952 0x80000000);
953 frvbf_force_update (current_cpu); /* Force update of target register. */
954 }
955 else if (arg2 == 0)
956 dtt = FRV_DTT_DIVISION_BY_ZERO;
957 else
958 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
959 arg1 / arg2);
960
961 /* Check for exceptions. */
962 if (dtt != FRV_DTT_NO_EXCEPTION)
963 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
964 non_excepting);
965 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
966 {
967 /* Non excepting instruction. Clear the NE flag for the target
968 register. */
969 SI NE_flags[2];
970 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
971 CLEAR_NE_FLAG (NE_flags, target_index);
972 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
973 }
974 }
975
976 void
977 frvbf_unsigned_integer_divide (
978 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
979 )
980 {
981 if (arg2 == 0)
982 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
983 target_index, non_excepting);
984 else
985 {
986 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
987 arg1 / arg2);
988 if (non_excepting)
989 {
990 /* Non excepting instruction. Clear the NE flag for the target
991 register. */
992 SI NE_flags[2];
993 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
994 CLEAR_NE_FLAG (NE_flags, target_index);
995 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
996 }
997 }
998 }
999 \f
1000 /* Clear accumulators. */
1001 void
1002 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
1003 {
1004 SIM_DESC sd = CPU_STATE (current_cpu);
1005 int acc_mask =
1006 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 :
1007 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 :
1008 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 :
1009 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 :
1010 63;
1011 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1012
1013 ps->mclracc_acc = acc_ix;
1014 ps->mclracc_A = A;
1015 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
1016 {
1017 /* This instruction is a nop if the referenced accumulator is not
1018 implemented. */
1019 if ((acc_ix & acc_mask) == acc_ix)
1020 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
1021 }
1022 else
1023 {
1024 /* Clear all implemented accumulators. */
1025 int i;
1026 for (i = 0; i <= acc_mask; ++i)
1027 if ((i & acc_mask) == i)
1028 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
1029 }
1030 }
1031 \f
1032 /* Functions to aid insn semantics. */
1033
1034 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1035 SI
1036 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
1037 {
1038 SI i;
1039 SI mask;
1040
1041 if (value == 0)
1042 return 63;
1043
1044 /* Find the position of the first non-zero bit.
1045 The loop will terminate since there is guaranteed to be at least one
1046 non-zero bit. */
1047 mask = 1 << (sizeof (mask) * 8 - 1);
1048 for (i = 0; (value & mask) == 0; ++i)
1049 value <<= 1;
1050
1051 return i;
1052 }
1053
1054 /* Compute the result of the cut insns. */
1055 SI
1056 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1057 {
1058 SI result;
1059 if (cut_point < 32)
1060 {
1061 result = reg1 << cut_point;
1062 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1063 }
1064 else
1065 result = reg2 << (cut_point - 32);
1066
1067 return result;
1068 }
1069
1070 /* Compute the result of the cut insns. */
1071 SI
1072 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1073 {
1074 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1075 cut_point = cut_point << 26 >> 26;
1076
1077 /* The cut_point is relative to bit 40 of 64 bits. */
1078 if (cut_point >= 0)
1079 return (acc << (cut_point + 24)) >> 32;
1080
1081 /* Extend the sign bit (bit 40) for negative cuts. */
1082 if (cut_point == -32)
1083 return (acc << 24) >> 63; /* Special case for full shiftout. */
1084
1085 return (acc << 24) >> (32 + -cut_point);
1086 }
1087
1088 /* Compute the result of the cut insns. */
1089 SI
1090 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1091 {
1092 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1093 cut_point = cut_point << 26 >> 26;
1094
1095 if (cut_point >= 0)
1096 {
1097 /* The cut_point is relative to bit 40 of 64 bits. */
1098 DI shifted = acc << (cut_point + 24);
1099 DI unshifted = shifted >> (cut_point + 24);
1100
1101 /* The result will be saturated if significant bits are shifted out. */
1102 if (unshifted != acc)
1103 {
1104 if (acc < 0)
1105 return 0x80000000;
1106 return 0x7fffffff;
1107 }
1108 }
1109
1110 /* The result will not be saturated, so use the code for the normal cut. */
1111 return frvbf_media_cut (current_cpu, acc, cut_point);
1112 }
1113
1114 /* Compute the result of int accumulator cut (SCUTSS). */
1115 SI
1116 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1117 {
1118 DI lower, upper;
1119
1120 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1121 cut_point = cut_point << 25 >> 25;
1122
1123 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1124 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1125 of this 128-bit value.
1126
1127 Since we can't deal with 128-bit values very easily, convert the
1128 operation into an equivalent 64-bit one. */
1129 if (cut_point < 0)
1130 {
1131 /* Avoid an undefined shift operation. */
1132 if (cut_point == -64)
1133 acc >>= 63;
1134 else
1135 acc >>= -cut_point;
1136 cut_point = 0;
1137 }
1138
1139 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1140 32 bits of the result and UPPER to the result >> 31. */
1141 if (cut_point < 32)
1142 {
1143 /* The cut loses the (32 - CUT_POINT) least significant bits.
1144 Round the result up if the most significant of these lost bits
1145 is 1. */
1146 lower = acc >> (32 - cut_point);
1147 if (lower < 0x7fffffff)
1148 if (acc & LSBIT64 (32 - cut_point - 1))
1149 lower++;
1150 upper = lower >> 31;
1151 }
1152 else
1153 {
1154 lower = acc << (cut_point - 32);
1155 upper = acc >> (63 - cut_point);
1156 }
1157
1158 /* Saturate the result. */
1159 if (upper < -1)
1160 return ~0x7fffffff;
1161 else if (upper > 0)
1162 return 0x7fffffff;
1163 else
1164 return lower;
1165 }
1166
1167 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1168 SI
1169 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1170 {
1171 int neg_arg1;
1172
1173 /* FIXME: what to do with negative shift amt? */
1174 if (arg2 <= 0)
1175 return arg1;
1176
1177 if (arg1 == 0)
1178 return 0;
1179
1180 /* Signed shift by 31 or greater saturates by definition. */
1181 if (arg2 >= 31)
1182 if (arg1 > 0)
1183 return (SI) 0x7fffffff;
1184 else
1185 return (SI) 0x80000000;
1186
1187 /* OK, arg2 is between 1 and 31. */
1188 neg_arg1 = (arg1 < 0);
1189 do {
1190 arg1 <<= 1;
1191 /* Check for sign bit change (saturation). */
1192 if (neg_arg1 && (arg1 >= 0))
1193 return (SI) 0x80000000;
1194 else if (!neg_arg1 && (arg1 < 0))
1195 return (SI) 0x7fffffff;
1196 } while (--arg2 > 0);
1197
1198 return arg1;
1199 }
1200
1201 /* Simulate the media custom insns. */
1202 void
1203 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1204 {
1205 /* The semantics of the insn are a nop, since it is implementation defined.
1206 We do need to check whether it's implemented and set up for MTRAP
1207 if it's not. */
1208 USI msr0 = GET_MSR (0);
1209 if (GET_MSR_EMCI (msr0) == 0)
1210 {
1211 /* no interrupt queued at this time. */
1212 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1213 }
1214 }
1215
1216 /* Simulate the media average (MAVEH) insn. */
1217 static HI
1218 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1219 {
1220 SIM_DESC sd = CPU_STATE (current_cpu);
1221 SI sum = (arg1 + arg2);
1222 HI result = sum >> 1;
1223 int rounding_value;
1224
1225 /* On fr4xx and fr550, check the rounding mode. On other machines
1226 rounding is always toward negative infinity and the result is
1227 already correctly rounded. */
1228 switch (STATE_ARCHITECTURE (sd)->mach)
1229 {
1230 /* Need to check rounding mode. */
1231 case bfd_mach_fr400:
1232 case bfd_mach_fr450:
1233 case bfd_mach_fr550:
1234 /* Check whether rounding will be required. Rounding will be required
1235 if the sum is an odd number. */
1236 rounding_value = sum & 1;
1237 if (rounding_value)
1238 {
1239 USI msr0 = GET_MSR (0);
1240 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1241 if (GET_MSR_SRDAV (msr0))
1242 {
1243 /* MSR0.RD controls rounding. */
1244 switch (GET_MSR_RD (msr0))
1245 {
1246 case 0:
1247 /* Round to nearest. */
1248 if (result >= 0)
1249 ++result;
1250 break;
1251 case 1:
1252 /* Round toward 0. */
1253 if (result < 0)
1254 ++result;
1255 break;
1256 case 2:
1257 /* Round toward positive infinity. */
1258 ++result;
1259 break;
1260 case 3:
1261 /* Round toward negative infinity. The result is already
1262 correctly rounded. */
1263 break;
1264 default:
1265 abort ();
1266 break;
1267 }
1268 }
1269 else
1270 {
1271 /* MSR0.RDAV controls rounding. If set, round toward positive
1272 infinity. Otherwise the result is already rounded correctly
1273 toward negative infinity. */
1274 if (GET_MSR_RDAV (msr0))
1275 ++result;
1276 }
1277 }
1278 break;
1279 default:
1280 break;
1281 }
1282
1283 return result;
1284 }
1285
1286 SI
1287 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1288 {
1289 SI result;
1290 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1291 result &= 0xffff;
1292 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1293 (reg2 >> 16) & 0xffff) << 16;
1294 return result;
1295 }
1296
1297 /* Maintain a flag in order to know when to write the address of the next
1298 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1299 void
1300 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1301 {
1302 frvbf_write_next_vliw_addr_to_LR = value;
1303 }
1304
1305 void
1306 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1307 {
1308 USI NE_flags[2];
1309
1310 /* Save the target register so interrupt processing can set its NE flag
1311 in the event of an exception. */
1312 frv_interrupt_state.ne_index = index;
1313
1314 /* Clear the NE flag of the target register. It will be reset if necessary
1315 in the event of an exception. */
1316 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1317 CLEAR_NE_FLAG (NE_flags, index);
1318 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1319 }
1320
1321 void
1322 frvbf_force_update (SIM_CPU *current_cpu)
1323 {
1324 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1325 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1326 if (ix > 0)
1327 {
1328 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1329 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1330 }
1331 }
1332 \f
1333 /* Condition code logic. */
1334 enum cr_ops {
1335 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1336 num_cr_ops
1337 };
1338
1339 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1340
1341 static enum cr_result
1342 cr_logic[num_cr_ops][4][4] = {
1343 /* andcr */
1344 {
1345 /* undefined undefined false true */
1346 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1347 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1348 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1349 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1350 },
1351 /* orcr */
1352 {
1353 /* undefined undefined false true */
1354 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1355 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1356 /* false */ {cr_false, cr_false, cr_false, cr_true },
1357 /* true */ {cr_true, cr_true, cr_true, cr_true }
1358 },
1359 /* xorcr */
1360 {
1361 /* undefined undefined false true */
1362 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1363 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1364 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1365 /* true */ {cr_true, cr_true, cr_true, cr_false }
1366 },
1367 /* nandcr */
1368 {
1369 /* undefined undefined false true */
1370 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1371 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1372 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1373 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1374 },
1375 /* norcr */
1376 {
1377 /* undefined undefined false true */
1378 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1379 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1380 /* false */ {cr_true, cr_true, cr_true, cr_false },
1381 /* true */ {cr_false, cr_false, cr_false, cr_false }
1382 },
1383 /* andncr */
1384 {
1385 /* undefined undefined false true */
1386 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1387 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1388 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1389 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1390 },
1391 /* orncr */
1392 {
1393 /* undefined undefined false true */
1394 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1395 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1396 /* false */ {cr_true, cr_true, cr_true, cr_true },
1397 /* true */ {cr_false, cr_false, cr_false, cr_true }
1398 },
1399 /* nandncr */
1400 {
1401 /* undefined undefined false true */
1402 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1403 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1404 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1405 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1406 },
1407 /* norncr */
1408 {
1409 /* undefined undefined false true */
1410 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1411 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1412 /* false */ {cr_false, cr_false, cr_false, cr_false },
1413 /* true */ {cr_true, cr_true, cr_true, cr_false }
1414 }
1415 };
1416
1417 UQI
1418 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1419 {
1420 return cr_logic[operation][arg1][arg2];
1421 }
1422 \f
1423 /* Cache Manipulation. */
1424 void
1425 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1426 {
1427 /* If we need to count cycles, then the cache operation will be
1428 initiated from the model profiling functions.
1429 See frvbf_model_.... */
1430 int hsr0 = GET_HSR0 ();
1431 if (GET_HSR0_ICE (hsr0))
1432 {
1433 if (model_insn)
1434 {
1435 CPU_LOAD_ADDRESS (current_cpu) = address;
1436 CPU_LOAD_LENGTH (current_cpu) = length;
1437 CPU_LOAD_LOCK (current_cpu) = lock;
1438 }
1439 else
1440 {
1441 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1442 frv_cache_preload (cache, address, length, lock);
1443 }
1444 }
1445 }
1446
1447 void
1448 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1449 {
1450 /* If we need to count cycles, then the cache operation will be
1451 initiated from the model profiling functions.
1452 See frvbf_model_.... */
1453 int hsr0 = GET_HSR0 ();
1454 if (GET_HSR0_DCE (hsr0))
1455 {
1456 if (model_insn)
1457 {
1458 CPU_LOAD_ADDRESS (current_cpu) = address;
1459 CPU_LOAD_LENGTH (current_cpu) = length;
1460 CPU_LOAD_LOCK (current_cpu) = lock;
1461 }
1462 else
1463 {
1464 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1465 frv_cache_preload (cache, address, length, lock);
1466 }
1467 }
1468 }
1469
1470 void
1471 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1472 {
1473 /* If we need to count cycles, then the cache operation will be
1474 initiated from the model profiling functions.
1475 See frvbf_model_.... */
1476 int hsr0 = GET_HSR0 ();
1477 if (GET_HSR0_ICE (hsr0))
1478 {
1479 if (model_insn)
1480 CPU_LOAD_ADDRESS (current_cpu) = address;
1481 else
1482 {
1483 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1484 frv_cache_unlock (cache, address);
1485 }
1486 }
1487 }
1488
1489 void
1490 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1491 {
1492 /* If we need to count cycles, then the cache operation will be
1493 initiated from the model profiling functions.
1494 See frvbf_model_.... */
1495 int hsr0 = GET_HSR0 ();
1496 if (GET_HSR0_DCE (hsr0))
1497 {
1498 if (model_insn)
1499 CPU_LOAD_ADDRESS (current_cpu) = address;
1500 else
1501 {
1502 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1503 frv_cache_unlock (cache, address);
1504 }
1505 }
1506 }
1507
1508 void
1509 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1510 {
1511 /* Make sure the insn was specified properly. -1 will be passed for ALL
1512 for a icei with A=0. */
1513 if (all == -1)
1514 {
1515 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1516 return;
1517 }
1518
1519 /* If we need to count cycles, then the cache operation will be
1520 initiated from the model profiling functions.
1521 See frvbf_model_.... */
1522 if (model_insn)
1523 {
1524 /* Record the all-entries flag for use in profiling. */
1525 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1526 ps->all_cache_entries = all;
1527 CPU_LOAD_ADDRESS (current_cpu) = address;
1528 }
1529 else
1530 {
1531 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1532 if (all)
1533 frv_cache_invalidate_all (cache, 0/* flush? */);
1534 else
1535 frv_cache_invalidate (cache, address, 0/* flush? */);
1536 }
1537 }
1538
1539 void
1540 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1541 {
1542 /* Make sure the insn was specified properly. -1 will be passed for ALL
1543 for a dcei with A=0. */
1544 if (all == -1)
1545 {
1546 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1547 return;
1548 }
1549
1550 /* If we need to count cycles, then the cache operation will be
1551 initiated from the model profiling functions.
1552 See frvbf_model_.... */
1553 if (model_insn)
1554 {
1555 /* Record the all-entries flag for use in profiling. */
1556 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1557 ps->all_cache_entries = all;
1558 CPU_LOAD_ADDRESS (current_cpu) = address;
1559 }
1560 else
1561 {
1562 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1563 if (all)
1564 frv_cache_invalidate_all (cache, 0/* flush? */);
1565 else
1566 frv_cache_invalidate (cache, address, 0/* flush? */);
1567 }
1568 }
1569
1570 void
1571 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1572 {
1573 /* Make sure the insn was specified properly. -1 will be passed for ALL
1574 for a dcef with A=0. */
1575 if (all == -1)
1576 {
1577 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1578 return;
1579 }
1580
1581 /* If we need to count cycles, then the cache operation will be
1582 initiated from the model profiling functions.
1583 See frvbf_model_.... */
1584 if (model_insn)
1585 {
1586 /* Record the all-entries flag for use in profiling. */
1587 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1588 ps->all_cache_entries = all;
1589 CPU_LOAD_ADDRESS (current_cpu) = address;
1590 }
1591 else
1592 {
1593 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1594 if (all)
1595 frv_cache_invalidate_all (cache, 1/* flush? */);
1596 else
1597 frv_cache_invalidate (cache, address, 1/* flush? */);
1598 }
1599 }