]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/frv/frv.c
sim: reg: constify store helper
[thirdparty/binutils-gdb.git] / sim / frv / frv.c
1 /* frv simulator support code
2 Copyright (C) 1998-2022 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* This must come before any other includes. */
21 #include "defs.h"
22
23 #define WANT_CPU
24 #define WANT_CPU_FRVBF
25
26 #include "sim-main.h"
27 #include "cgen-mem.h"
28 #include "cgen-ops.h"
29 #include "cgen-engine.h"
30 #include "cgen-par.h"
31 #include "bfd.h"
32 #include "gdb/sim-frv.h"
33 #include <math.h>
34 #include <stdlib.h>
35
36 /* Maintain a flag in order to know when to write the address of the next
37 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
38 insns. */
39 int frvbf_write_next_vliw_addr_to_LR;
40
41 /* The contents of BUF are in target byte order. */
42 int
43 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
44 {
45 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
46 {
47 int hi_available, lo_available;
48 int grn = rn - SIM_FRV_GR0_REGNUM;
49
50 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
51
52 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
53 return 0;
54 else
55 SETTSI (buf, GET_H_GR (grn));
56 }
57 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
58 {
59 int hi_available, lo_available;
60 int frn = rn - SIM_FRV_FR0_REGNUM;
61
62 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
63
64 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
65 return 0;
66 else
67 SETTSI (buf, GET_H_FR (frn));
68 }
69 else if (rn == SIM_FRV_PC_REGNUM)
70 SETTSI (buf, GET_H_PC ());
71 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
72 {
73 /* Make sure the register is implemented. */
74 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
75 int spr = rn - SIM_FRV_SPR0_REGNUM;
76 if (! control->spr[spr].implemented)
77 return 0;
78 SETTSI (buf, GET_H_SPR (spr));
79 }
80 else
81 {
82 SETTSI (buf, 0xdeadbeef);
83 return 0;
84 }
85
86 return len;
87 }
88
89 /* The contents of BUF are in target byte order. */
90
91 int
92 frvbf_store_register (SIM_CPU *current_cpu, int rn, const unsigned char *buf, int len)
93 {
94 if (SIM_FRV_GR0_REGNUM <= rn && rn <= SIM_FRV_GR63_REGNUM)
95 {
96 int hi_available, lo_available;
97 int grn = rn - SIM_FRV_GR0_REGNUM;
98
99 frv_gr_registers_available (current_cpu, &hi_available, &lo_available);
100
101 if ((grn < 32 && !lo_available) || (grn >= 32 && !hi_available))
102 return 0;
103 else
104 SET_H_GR (grn, GETTSI (buf));
105 }
106 else if (SIM_FRV_FR0_REGNUM <= rn && rn <= SIM_FRV_FR63_REGNUM)
107 {
108 int hi_available, lo_available;
109 int frn = rn - SIM_FRV_FR0_REGNUM;
110
111 frv_fr_registers_available (current_cpu, &hi_available, &lo_available);
112
113 if ((frn < 32 && !lo_available) || (frn >= 32 && !hi_available))
114 return 0;
115 else
116 SET_H_FR (frn, GETTSI (buf));
117 }
118 else if (rn == SIM_FRV_PC_REGNUM)
119 SET_H_PC (GETTSI (buf));
120 else if (SIM_FRV_SPR0_REGNUM <= rn && rn <= SIM_FRV_SPR4095_REGNUM)
121 {
122 /* Make sure the register is implemented. */
123 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
124 int spr = rn - SIM_FRV_SPR0_REGNUM;
125 if (! control->spr[spr].implemented)
126 return 0;
127 SET_H_SPR (spr, GETTSI (buf));
128 }
129 else
130 return 0;
131
132 return len;
133 }
134 \f
135 /* Cover fns to access the general registers. */
136 USI
137 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
138 {
139 frv_check_gr_access (current_cpu, gr);
140 return CPU (h_gr[gr]);
141 }
142
143 void
144 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
145 {
146 frv_check_gr_access (current_cpu, gr);
147
148 if (gr == 0)
149 return; /* Storing into gr0 has no effect. */
150
151 CPU (h_gr[gr]) = newval;
152 }
153 \f
154 /* Cover fns to access the floating point registers. */
155 SF
156 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
157 {
158 frv_check_fr_access (current_cpu, fr);
159 return CPU (h_fr[fr]);
160 }
161
162 void
163 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
164 {
165 frv_check_fr_access (current_cpu, fr);
166 CPU (h_fr[fr]) = newval;
167 }
168 \f
169 /* Cover fns to access the general registers as double words. */
170 static UINT
171 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
172 {
173 if (reg & align_mask)
174 {
175 SIM_DESC sd = CPU_STATE (current_cpu);
176 switch (STATE_ARCHITECTURE (sd)->mach)
177 {
178 /* Note: there is a discrepancy between V2.2 of the FR400
179 instruction manual and the various FR4xx LSI specs.
180 The former claims that unaligned registers cause a
181 register_exception while the latter say it's an
182 illegal_instruction. The LSI specs appear to be
183 correct; in fact, the FR4xx series is not documented
184 as having a register_exception. */
185 case bfd_mach_fr400:
186 case bfd_mach_fr450:
187 case bfd_mach_fr550:
188 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
189 break;
190 case bfd_mach_frvtomcat:
191 case bfd_mach_fr500:
192 case bfd_mach_frv:
193 frv_queue_register_exception_interrupt (current_cpu,
194 FRV_REC_UNALIGNED);
195 break;
196 default:
197 break;
198 }
199
200 reg &= ~align_mask;
201 }
202
203 return reg;
204 }
205
206 static UINT
207 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
208 {
209 if (reg & align_mask)
210 {
211 SIM_DESC sd = CPU_STATE (current_cpu);
212 switch (STATE_ARCHITECTURE (sd)->mach)
213 {
214 /* See comment in check_register_alignment(). */
215 case bfd_mach_fr400:
216 case bfd_mach_fr450:
217 case bfd_mach_fr550:
218 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
219 break;
220 case bfd_mach_frvtomcat:
221 case bfd_mach_fr500:
222 case bfd_mach_frv:
223 {
224 struct frv_fp_exception_info fp_info = {
225 FSR_NO_EXCEPTION, FTT_INVALID_FR
226 };
227 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
228 }
229 break;
230 default:
231 break;
232 }
233
234 reg &= ~align_mask;
235 }
236
237 return reg;
238 }
239
240 static UINT
241 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
242 {
243 if (address & align_mask)
244 {
245 SIM_DESC sd = CPU_STATE (current_cpu);
246 switch (STATE_ARCHITECTURE (sd)->mach)
247 {
248 /* See comment in check_register_alignment(). */
249 case bfd_mach_fr400:
250 case bfd_mach_fr450:
251 frv_queue_data_access_error_interrupt (current_cpu, address);
252 break;
253 case bfd_mach_frvtomcat:
254 case bfd_mach_fr500:
255 case bfd_mach_frv:
256 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
257 break;
258 default:
259 break;
260 }
261
262 address &= ~align_mask;
263 }
264
265 return address;
266 }
267
268 DI
269 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
270 {
271 DI value;
272
273 if (gr == 0)
274 return 0; /* gr0 is always 0. */
275
276 /* Check the register alignment. */
277 gr = check_register_alignment (current_cpu, gr, 1);
278
279 value = GET_H_GR (gr);
280 value <<= 32;
281 value |= (USI) GET_H_GR (gr + 1);
282 return value;
283 }
284
285 void
286 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
287 {
288 if (gr == 0)
289 return; /* Storing into gr0 has no effect. */
290
291 /* Check the register alignment. */
292 gr = check_register_alignment (current_cpu, gr, 1);
293
294 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
295 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
296 }
297 \f
298 /* Cover fns to access the floating point register as double words. */
299 DF
300 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
301 {
302 union {
303 SF as_sf[2];
304 DF as_df;
305 } value;
306
307 /* Check the register alignment. */
308 fr = check_fr_register_alignment (current_cpu, fr, 1);
309
310 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
311 {
312 value.as_sf[1] = GET_H_FR (fr);
313 value.as_sf[0] = GET_H_FR (fr + 1);
314 }
315 else
316 {
317 value.as_sf[0] = GET_H_FR (fr);
318 value.as_sf[1] = GET_H_FR (fr + 1);
319 }
320
321 return value.as_df;
322 }
323
324 void
325 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
326 {
327 union {
328 SF as_sf[2];
329 DF as_df;
330 } value;
331
332 /* Check the register alignment. */
333 fr = check_fr_register_alignment (current_cpu, fr, 1);
334
335 value.as_df = newval;
336 if (HOST_BYTE_ORDER == BFD_ENDIAN_LITTLE)
337 {
338 SET_H_FR (fr , value.as_sf[1]);
339 SET_H_FR (fr + 1, value.as_sf[0]);
340 }
341 else
342 {
343 SET_H_FR (fr , value.as_sf[0]);
344 SET_H_FR (fr + 1, value.as_sf[1]);
345 }
346 }
347 \f
348 /* Cover fns to access the floating point register as integer words. */
349 USI
350 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
351 {
352 union {
353 SF as_sf;
354 USI as_usi;
355 } value;
356
357 value.as_sf = GET_H_FR (fr);
358 return value.as_usi;
359 }
360
361 void
362 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
363 {
364 union {
365 SF as_sf;
366 USI as_usi;
367 } value;
368
369 value.as_usi = newval;
370 SET_H_FR (fr, value.as_sf);
371 }
372 \f
373 /* Cover fns to access the coprocessor registers as double words. */
374 DI
375 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
376 {
377 DI value;
378
379 /* Check the register alignment. */
380 cpr = check_register_alignment (current_cpu, cpr, 1);
381
382 value = GET_H_CPR (cpr);
383 value <<= 32;
384 value |= (USI) GET_H_CPR (cpr + 1);
385 return value;
386 }
387
388 void
389 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
390 {
391 /* Check the register alignment. */
392 cpr = check_register_alignment (current_cpu, cpr, 1);
393
394 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
395 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
396 }
397 \f
398 /* Cover fns to write registers as quad words. */
399 void
400 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
401 {
402 if (gr == 0)
403 return; /* Storing into gr0 has no effect. */
404
405 /* Check the register alignment. */
406 gr = check_register_alignment (current_cpu, gr, 3);
407
408 SET_H_GR (gr , newval[0]);
409 SET_H_GR (gr + 1, newval[1]);
410 SET_H_GR (gr + 2, newval[2]);
411 SET_H_GR (gr + 3, newval[3]);
412 }
413
414 void
415 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
416 {
417 /* Check the register alignment. */
418 fr = check_fr_register_alignment (current_cpu, fr, 3);
419
420 SET_H_FR (fr , newval[0]);
421 SET_H_FR (fr + 1, newval[1]);
422 SET_H_FR (fr + 2, newval[2]);
423 SET_H_FR (fr + 3, newval[3]);
424 }
425
426 void
427 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
428 {
429 /* Check the register alignment. */
430 cpr = check_register_alignment (current_cpu, cpr, 3);
431
432 SET_H_CPR (cpr , newval[0]);
433 SET_H_CPR (cpr + 1, newval[1]);
434 SET_H_CPR (cpr + 2, newval[2]);
435 SET_H_CPR (cpr + 3, newval[3]);
436 }
437 \f
438 /* Cover fns to access the special purpose registers. */
439 USI
440 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
441 {
442 /* Check access restrictions. */
443 frv_check_spr_read_access (current_cpu, spr);
444
445 switch (spr)
446 {
447 case H_SPR_PSR:
448 return spr_psr_get_handler (current_cpu);
449 case H_SPR_TBR:
450 return spr_tbr_get_handler (current_cpu);
451 case H_SPR_BPSR:
452 return spr_bpsr_get_handler (current_cpu);
453 case H_SPR_CCR:
454 return spr_ccr_get_handler (current_cpu);
455 case H_SPR_CCCR:
456 return spr_cccr_get_handler (current_cpu);
457 case H_SPR_SR0:
458 case H_SPR_SR1:
459 case H_SPR_SR2:
460 case H_SPR_SR3:
461 return spr_sr_get_handler (current_cpu, spr);
462 break;
463 default:
464 return CPU (h_spr[spr]);
465 }
466 return 0;
467 }
468
469 void
470 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
471 {
472 FRV_REGISTER_CONTROL *control;
473 USI mask;
474 USI oldval;
475
476 /* Check access restrictions. */
477 frv_check_spr_write_access (current_cpu, spr);
478
479 /* Only set those fields which are writeable. */
480 control = CPU_REGISTER_CONTROL (current_cpu);
481 mask = control->spr[spr].read_only_mask;
482 oldval = GET_H_SPR (spr);
483
484 newval = (newval & ~mask) | (oldval & mask);
485
486 /* Some registers are represented by individual components which are
487 referenced more often than the register itself. */
488 switch (spr)
489 {
490 case H_SPR_PSR:
491 spr_psr_set_handler (current_cpu, newval);
492 break;
493 case H_SPR_TBR:
494 spr_tbr_set_handler (current_cpu, newval);
495 break;
496 case H_SPR_BPSR:
497 spr_bpsr_set_handler (current_cpu, newval);
498 break;
499 case H_SPR_CCR:
500 spr_ccr_set_handler (current_cpu, newval);
501 break;
502 case H_SPR_CCCR:
503 spr_cccr_set_handler (current_cpu, newval);
504 break;
505 case H_SPR_SR0:
506 case H_SPR_SR1:
507 case H_SPR_SR2:
508 case H_SPR_SR3:
509 spr_sr_set_handler (current_cpu, spr, newval);
510 break;
511 case H_SPR_IHSR8:
512 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
513 break;
514 default:
515 CPU (h_spr[spr]) = newval;
516 break;
517 }
518 }
519 \f
520 /* Cover fns to access the gr_hi and gr_lo registers. */
521 UHI
522 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
523 {
524 return (GET_H_GR(gr) >> 16) & 0xffff;
525 }
526
527 void
528 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
529 {
530 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
531 SET_H_GR (gr, value);
532 }
533
534 UHI
535 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
536 {
537 return GET_H_GR(gr) & 0xffff;
538 }
539
540 void
541 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
542 {
543 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
544 SET_H_GR (gr, value);
545 }
546 \f
547 /* Cover fns to access the tbr bits. */
548 USI
549 spr_tbr_get_handler (SIM_CPU *current_cpu)
550 {
551 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
552 ((GET_H_TBR_TT () & 0xff) << 4);
553
554 return tbr;
555 }
556
557 void
558 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
559 {
560 int tbr = newval;
561
562 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
563 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
564 }
565 \f
566 /* Cover fns to access the bpsr bits. */
567 USI
568 spr_bpsr_get_handler (SIM_CPU *current_cpu)
569 {
570 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
571 ((GET_H_BPSR_BET () & 0x1) );
572
573 return bpsr;
574 }
575
576 void
577 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
578 {
579 int bpsr = newval;
580
581 SET_H_BPSR_BS ((bpsr >> 12) & 1);
582 SET_H_BPSR_BET ((bpsr ) & 1);
583 }
584 \f
585 /* Cover fns to access the psr bits. */
586 USI
587 spr_psr_get_handler (SIM_CPU *current_cpu)
588 {
589 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
590 ((GET_H_PSR_VER () & 0xf) << 24) |
591 ((GET_H_PSR_ICE () & 0x1) << 16) |
592 ((GET_H_PSR_NEM () & 0x1) << 14) |
593 ((GET_H_PSR_CM () & 0x1) << 13) |
594 ((GET_H_PSR_BE () & 0x1) << 12) |
595 ((GET_H_PSR_ESR () & 0x1) << 11) |
596 ((GET_H_PSR_EF () & 0x1) << 8) |
597 ((GET_H_PSR_EM () & 0x1) << 7) |
598 ((GET_H_PSR_PIL () & 0xf) << 3) |
599 ((GET_H_PSR_S () & 0x1) << 2) |
600 ((GET_H_PSR_PS () & 0x1) << 1) |
601 ((GET_H_PSR_ET () & 0x1) );
602
603 return psr;
604 }
605
606 void
607 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
608 {
609 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
610 first. */
611 SET_H_PSR_S ((newval >> 2) & 1);
612
613 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
614 SET_H_PSR_VER ((newval >> 24) & 0xf);
615 SET_H_PSR_ICE ((newval >> 16) & 1);
616 SET_H_PSR_NEM ((newval >> 14) & 1);
617 SET_H_PSR_CM ((newval >> 13) & 1);
618 SET_H_PSR_BE ((newval >> 12) & 1);
619 SET_H_PSR_ESR ((newval >> 11) & 1);
620 SET_H_PSR_EF ((newval >> 8) & 1);
621 SET_H_PSR_EM ((newval >> 7) & 1);
622 SET_H_PSR_PIL ((newval >> 3) & 0xf);
623 SET_H_PSR_PS ((newval >> 1) & 1);
624 SET_H_PSR_ET ((newval ) & 1);
625 }
626
627 void
628 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
629 {
630 /* If switching from user to supervisor mode, or vice-versa, then switch
631 the supervisor/user context. */
632 int psr_s = GET_H_PSR_S ();
633 if (psr_s != (newval & 1))
634 {
635 frvbf_switch_supervisor_user_context (current_cpu);
636 CPU (h_psr_s) = newval & 1;
637 }
638 }
639 \f
640 /* Cover fns to access the ccr bits. */
641 USI
642 spr_ccr_get_handler (SIM_CPU *current_cpu)
643 {
644 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
645 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
646 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
647 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
648 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
649 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
650 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
651 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
652
653 return ccr;
654 }
655
656 void
657 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
658 {
659 int ccr = newval;
660
661 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
662 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
663 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
664 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
665 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
666 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
667 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
668 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
669 }
670 \f
671 QI
672 frvbf_set_icc_for_shift_right (
673 SIM_CPU *current_cpu, SI value, SI shift, QI icc
674 )
675 {
676 /* Set the C flag of the given icc to the logical OR of the bits shifted
677 out. */
678 int mask = (1 << shift) - 1;
679 if ((value & mask) != 0)
680 return icc | 0x1;
681
682 return icc & 0xe;
683 }
684
685 QI
686 frvbf_set_icc_for_shift_left (
687 SIM_CPU *current_cpu, SI value, SI shift, QI icc
688 )
689 {
690 /* Set the V flag of the given icc to the logical OR of the bits shifted
691 out. */
692 int mask = ((1 << shift) - 1) << (32 - shift);
693 if ((value & mask) != 0)
694 return icc | 0x2;
695
696 return icc & 0xd;
697 }
698 \f
699 /* Cover fns to access the cccr bits. */
700 USI
701 spr_cccr_get_handler (SIM_CPU *current_cpu)
702 {
703 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
704 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
705 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
706 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
707 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
708 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
709 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
710 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
711
712 return cccr;
713 }
714
715 void
716 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
717 {
718 int cccr = newval;
719
720 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
721 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
722 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
723 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
724 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
725 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
726 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
727 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
728 }
729 \f
730 /* Cover fns to access the sr bits. */
731 USI
732 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
733 {
734 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
735 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
736 int psr_esr = GET_H_PSR_ESR ();
737 if (! psr_esr)
738 return GET_H_GR (4 + (spr - H_SPR_SR0));
739
740 return CPU (h_spr[spr]);
741 }
742
743 void
744 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
745 {
746 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
747 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
748 int psr_esr = GET_H_PSR_ESR ();
749 if (! psr_esr)
750 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
751 else
752 CPU (h_spr[spr]) = newval;
753 }
754 \f
755 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
756 void
757 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
758 {
759 if (GET_H_PSR_ESR ())
760 {
761 /* We need to be in supervisor mode to swap the registers. Access the
762 PSR.S directly in order to avoid recursive context switches. */
763 int i;
764 int save_psr_s = CPU (h_psr_s);
765 CPU (h_psr_s) = 1;
766 for (i = 0; i < 4; ++i)
767 {
768 int gr = i + 4;
769 int spr = i + H_SPR_SR0;
770 SI tmp = GET_H_SPR (spr);
771 SET_H_SPR (spr, GET_H_GR (gr));
772 SET_H_GR (gr, tmp);
773 }
774 CPU (h_psr_s) = save_psr_s;
775 }
776 }
777 \f
778 /* Handle load/store of quad registers. */
779 void
780 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
781 {
782 int i;
783 SI value[4];
784
785 /* Check memory alignment */
786 address = check_memory_alignment (current_cpu, address, 0xf);
787
788 /* If we need to count cycles, then the cache operation will be
789 initiated from the model profiling functions.
790 See frvbf_model_.... */
791 if (model_insn)
792 {
793 CPU_LOAD_ADDRESS (current_cpu) = address;
794 CPU_LOAD_LENGTH (current_cpu) = 16;
795 }
796 else
797 {
798 for (i = 0; i < 4; ++i)
799 {
800 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
801 address += 4;
802 }
803 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
804 value);
805 }
806 }
807
808 void
809 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
810 {
811 int i;
812 SI value[4];
813 USI hsr0;
814
815 /* Check register and memory alignment. */
816 src_ix = check_register_alignment (current_cpu, src_ix, 3);
817 address = check_memory_alignment (current_cpu, address, 0xf);
818
819 for (i = 0; i < 4; ++i)
820 {
821 /* GR0 is always 0. */
822 if (src_ix == 0)
823 value[i] = 0;
824 else
825 value[i] = GET_H_GR (src_ix + i);
826 }
827 hsr0 = GET_HSR0 ();
828 if (GET_HSR0_DCE (hsr0))
829 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
830 else
831 sim_queue_mem_xi_write (current_cpu, address, value);
832 }
833
834 void
835 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
836 {
837 int i;
838 SI value[4];
839
840 /* Check memory alignment */
841 address = check_memory_alignment (current_cpu, address, 0xf);
842
843 /* If we need to count cycles, then the cache operation will be
844 initiated from the model profiling functions.
845 See frvbf_model_.... */
846 if (model_insn)
847 {
848 CPU_LOAD_ADDRESS (current_cpu) = address;
849 CPU_LOAD_LENGTH (current_cpu) = 16;
850 }
851 else
852 {
853 for (i = 0; i < 4; ++i)
854 {
855 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
856 address += 4;
857 }
858 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
859 value);
860 }
861 }
862
863 void
864 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
865 {
866 int i;
867 SI value[4];
868 USI hsr0;
869
870 /* Check register and memory alignment. */
871 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
872 address = check_memory_alignment (current_cpu, address, 0xf);
873
874 for (i = 0; i < 4; ++i)
875 value[i] = GET_H_FR (src_ix + i);
876
877 hsr0 = GET_HSR0 ();
878 if (GET_HSR0_DCE (hsr0))
879 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
880 else
881 sim_queue_mem_xi_write (current_cpu, address, value);
882 }
883
884 void
885 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
886 {
887 int i;
888 SI value[4];
889
890 /* Check memory alignment */
891 address = check_memory_alignment (current_cpu, address, 0xf);
892
893 /* If we need to count cycles, then the cache operation will be
894 initiated from the model profiling functions.
895 See frvbf_model_.... */
896 if (model_insn)
897 {
898 CPU_LOAD_ADDRESS (current_cpu) = address;
899 CPU_LOAD_LENGTH (current_cpu) = 16;
900 }
901 else
902 {
903 for (i = 0; i < 4; ++i)
904 {
905 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
906 address += 4;
907 }
908 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
909 value);
910 }
911 }
912
913 void
914 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
915 {
916 int i;
917 SI value[4];
918 USI hsr0;
919
920 /* Check register and memory alignment. */
921 src_ix = check_register_alignment (current_cpu, src_ix, 3);
922 address = check_memory_alignment (current_cpu, address, 0xf);
923
924 for (i = 0; i < 4; ++i)
925 value[i] = GET_H_CPR (src_ix + i);
926
927 hsr0 = GET_HSR0 ();
928 if (GET_HSR0_DCE (hsr0))
929 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
930 else
931 sim_queue_mem_xi_write (current_cpu, address, value);
932 }
933 \f
934 void
935 frvbf_signed_integer_divide (
936 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
937 )
938 {
939 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
940 if (arg1 == 0x80000000 && arg2 == -1)
941 {
942 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
943 otherwise it may result in 0x7fffffff (sparc compatibility) or
944 0x80000000 (C language compatibility). */
945 USI isr;
946 dtt = FRV_DTT_OVERFLOW;
947
948 isr = GET_ISR ();
949 if (GET_ISR_EDE (isr))
950 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
951 0x7fffffff);
952 else
953 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
954 0x80000000);
955 frvbf_force_update (current_cpu); /* Force update of target register. */
956 }
957 else if (arg2 == 0)
958 dtt = FRV_DTT_DIVISION_BY_ZERO;
959 else
960 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
961 arg1 / arg2);
962
963 /* Check for exceptions. */
964 if (dtt != FRV_DTT_NO_EXCEPTION)
965 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
966 non_excepting);
967 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
968 {
969 /* Non excepting instruction. Clear the NE flag for the target
970 register. */
971 SI NE_flags[2];
972 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
973 CLEAR_NE_FLAG (NE_flags, target_index);
974 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
975 }
976 }
977
978 void
979 frvbf_unsigned_integer_divide (
980 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
981 )
982 {
983 if (arg2 == 0)
984 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
985 target_index, non_excepting);
986 else
987 {
988 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
989 arg1 / arg2);
990 if (non_excepting)
991 {
992 /* Non excepting instruction. Clear the NE flag for the target
993 register. */
994 SI NE_flags[2];
995 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
996 CLEAR_NE_FLAG (NE_flags, target_index);
997 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
998 }
999 }
1000 }
1001 \f
1002 /* Clear accumulators. */
1003 void
1004 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
1005 {
1006 SIM_DESC sd = CPU_STATE (current_cpu);
1007 int acc_mask =
1008 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 7 :
1009 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 7 :
1010 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450) ? 11 :
1011 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 3 :
1012 63;
1013 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1014
1015 ps->mclracc_acc = acc_ix;
1016 ps->mclracc_A = A;
1017 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
1018 {
1019 /* This instruction is a nop if the referenced accumulator is not
1020 implemented. */
1021 if ((acc_ix & acc_mask) == acc_ix)
1022 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
1023 }
1024 else
1025 {
1026 /* Clear all implemented accumulators. */
1027 int i;
1028 for (i = 0; i <= acc_mask; ++i)
1029 if ((i & acc_mask) == i)
1030 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
1031 }
1032 }
1033 \f
1034 /* Functions to aid insn semantics. */
1035
1036 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1037 SI
1038 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
1039 {
1040 SI i;
1041 SI mask;
1042
1043 if (value == 0)
1044 return 63;
1045
1046 /* Find the position of the first non-zero bit.
1047 The loop will terminate since there is guaranteed to be at least one
1048 non-zero bit. */
1049 mask = 1 << (sizeof (mask) * 8 - 1);
1050 for (i = 0; (value & mask) == 0; ++i)
1051 value <<= 1;
1052
1053 return i;
1054 }
1055
1056 /* Compute the result of the cut insns. */
1057 SI
1058 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
1059 {
1060 SI result;
1061 cut_point &= 0x3f;
1062 if (cut_point < 32)
1063 {
1064 result = reg1 << cut_point;
1065 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
1066 }
1067 else
1068 result = reg2 << (cut_point - 32);
1069
1070 return result;
1071 }
1072
1073 /* Compute the result of the cut insns. */
1074 SI
1075 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1076 {
1077 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1078 cut_point = cut_point << 26 >> 26;
1079
1080 /* The cut_point is relative to bit 40 of 64 bits. */
1081 if (cut_point >= 0)
1082 return (acc << (cut_point + 24)) >> 32;
1083
1084 /* Extend the sign bit (bit 40) for negative cuts. */
1085 if (cut_point == -32)
1086 return (acc << 24) >> 63; /* Special case for full shiftout. */
1087
1088 return (acc << 24) >> (32 + -cut_point);
1089 }
1090
1091 /* Compute the result of the cut insns. */
1092 SI
1093 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1094 {
1095 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1096 cut_point = cut_point << 26 >> 26;
1097
1098 if (cut_point >= 0)
1099 {
1100 /* The cut_point is relative to bit 40 of 64 bits. */
1101 DI shifted = acc << (cut_point + 24);
1102 DI unshifted = shifted >> (cut_point + 24);
1103
1104 /* The result will be saturated if significant bits are shifted out. */
1105 if (unshifted != acc)
1106 {
1107 if (acc < 0)
1108 return 0x80000000;
1109 return 0x7fffffff;
1110 }
1111 }
1112
1113 /* The result will not be saturated, so use the code for the normal cut. */
1114 return frvbf_media_cut (current_cpu, acc, cut_point);
1115 }
1116
1117 /* Compute the result of int accumulator cut (SCUTSS). */
1118 SI
1119 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1120 {
1121 DI lower, upper;
1122
1123 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1124 cut_point = cut_point << 25 >> 25;
1125
1126 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1127 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1128 of this 128-bit value.
1129
1130 Since we can't deal with 128-bit values very easily, convert the
1131 operation into an equivalent 64-bit one. */
1132 if (cut_point < 0)
1133 {
1134 /* Avoid an undefined shift operation. */
1135 if (cut_point == -64)
1136 acc >>= 63;
1137 else
1138 acc >>= -cut_point;
1139 cut_point = 0;
1140 }
1141
1142 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1143 32 bits of the result and UPPER to the result >> 31. */
1144 if (cut_point < 32)
1145 {
1146 /* The cut loses the (32 - CUT_POINT) least significant bits.
1147 Round the result up if the most significant of these lost bits
1148 is 1. */
1149 lower = acc >> (32 - cut_point);
1150 if (lower < 0x7fffffff)
1151 if (acc & LSBIT64 (32 - cut_point - 1))
1152 lower++;
1153 upper = lower >> 31;
1154 }
1155 else
1156 {
1157 lower = acc << (cut_point - 32);
1158 upper = acc >> (63 - cut_point);
1159 }
1160
1161 /* Saturate the result. */
1162 if (upper < -1)
1163 return ~0x7fffffff;
1164 else if (upper > 0)
1165 return 0x7fffffff;
1166 else
1167 return lower;
1168 }
1169
1170 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1171 SI
1172 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1173 {
1174 int neg_arg1;
1175
1176 /* FIXME: what to do with negative shift amt? */
1177 if (arg2 <= 0)
1178 return arg1;
1179
1180 if (arg1 == 0)
1181 return 0;
1182
1183 /* Signed shift by 31 or greater saturates by definition. */
1184 if (arg2 >= 31)
1185 {
1186 if (arg1 > 0)
1187 return (SI) 0x7fffffff;
1188 else
1189 return (SI) 0x80000000;
1190 }
1191
1192 /* OK, arg2 is between 1 and 31. */
1193 neg_arg1 = (arg1 < 0);
1194 do {
1195 arg1 <<= 1;
1196 /* Check for sign bit change (saturation). */
1197 if (neg_arg1 && (arg1 >= 0))
1198 return (SI) 0x80000000;
1199 else if (!neg_arg1 && (arg1 < 0))
1200 return (SI) 0x7fffffff;
1201 } while (--arg2 > 0);
1202
1203 return arg1;
1204 }
1205
1206 /* Simulate the media custom insns. */
1207 void
1208 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1209 {
1210 /* The semantics of the insn are a nop, since it is implementation defined.
1211 We do need to check whether it's implemented and set up for MTRAP
1212 if it's not. */
1213 USI msr0 = GET_MSR (0);
1214 if (GET_MSR_EMCI (msr0) == 0)
1215 {
1216 /* no interrupt queued at this time. */
1217 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1218 }
1219 }
1220
1221 /* Simulate the media average (MAVEH) insn. */
1222 static HI
1223 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1224 {
1225 SIM_DESC sd = CPU_STATE (current_cpu);
1226 SI sum = (arg1 + arg2);
1227 HI result = sum >> 1;
1228 int rounding_value;
1229
1230 /* On fr4xx and fr550, check the rounding mode. On other machines
1231 rounding is always toward negative infinity and the result is
1232 already correctly rounded. */
1233 switch (STATE_ARCHITECTURE (sd)->mach)
1234 {
1235 /* Need to check rounding mode. */
1236 case bfd_mach_fr400:
1237 case bfd_mach_fr450:
1238 case bfd_mach_fr550:
1239 /* Check whether rounding will be required. Rounding will be required
1240 if the sum is an odd number. */
1241 rounding_value = sum & 1;
1242 if (rounding_value)
1243 {
1244 USI msr0 = GET_MSR (0);
1245 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1246 if (GET_MSR_SRDAV (msr0))
1247 {
1248 /* MSR0.RD controls rounding. */
1249 switch (GET_MSR_RD (msr0))
1250 {
1251 case 0:
1252 /* Round to nearest. */
1253 if (result >= 0)
1254 ++result;
1255 break;
1256 case 1:
1257 /* Round toward 0. */
1258 if (result < 0)
1259 ++result;
1260 break;
1261 case 2:
1262 /* Round toward positive infinity. */
1263 ++result;
1264 break;
1265 case 3:
1266 /* Round toward negative infinity. The result is already
1267 correctly rounded. */
1268 break;
1269 default:
1270 abort ();
1271 break;
1272 }
1273 }
1274 else
1275 {
1276 /* MSR0.RDAV controls rounding. If set, round toward positive
1277 infinity. Otherwise the result is already rounded correctly
1278 toward negative infinity. */
1279 if (GET_MSR_RDAV (msr0))
1280 ++result;
1281 }
1282 }
1283 break;
1284 default:
1285 break;
1286 }
1287
1288 return result;
1289 }
1290
1291 SI
1292 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1293 {
1294 SI result;
1295 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1296 result &= 0xffff;
1297 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1298 (reg2 >> 16) & 0xffff) << 16;
1299 return result;
1300 }
1301
1302 /* Maintain a flag in order to know when to write the address of the next
1303 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1304 void
1305 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1306 {
1307 frvbf_write_next_vliw_addr_to_LR = value;
1308 }
1309
1310 void
1311 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1312 {
1313 USI NE_flags[2];
1314
1315 /* Save the target register so interrupt processing can set its NE flag
1316 in the event of an exception. */
1317 frv_interrupt_state.ne_index = index;
1318
1319 /* Clear the NE flag of the target register. It will be reset if necessary
1320 in the event of an exception. */
1321 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1322 CLEAR_NE_FLAG (NE_flags, index);
1323 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1324 }
1325
1326 void
1327 frvbf_force_update (SIM_CPU *current_cpu)
1328 {
1329 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1330 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1331 if (ix > 0)
1332 {
1333 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1334 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1335 }
1336 }
1337 \f
1338 /* Condition code logic. */
1339 enum cr_ops {
1340 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1341 num_cr_ops
1342 };
1343
1344 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1345
1346 static enum cr_result
1347 cr_logic[num_cr_ops][4][4] = {
1348 /* andcr */
1349 {
1350 /* undefined undefined false true */
1351 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1352 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1353 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1354 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1355 },
1356 /* orcr */
1357 {
1358 /* undefined undefined false true */
1359 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1360 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1361 /* false */ {cr_false, cr_false, cr_false, cr_true },
1362 /* true */ {cr_true, cr_true, cr_true, cr_true }
1363 },
1364 /* xorcr */
1365 {
1366 /* undefined undefined false true */
1367 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1368 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1369 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1370 /* true */ {cr_true, cr_true, cr_true, cr_false }
1371 },
1372 /* nandcr */
1373 {
1374 /* undefined undefined false true */
1375 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1376 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1377 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1378 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1379 },
1380 /* norcr */
1381 {
1382 /* undefined undefined false true */
1383 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1384 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1385 /* false */ {cr_true, cr_true, cr_true, cr_false },
1386 /* true */ {cr_false, cr_false, cr_false, cr_false }
1387 },
1388 /* andncr */
1389 {
1390 /* undefined undefined false true */
1391 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1392 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1393 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1394 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1395 },
1396 /* orncr */
1397 {
1398 /* undefined undefined false true */
1399 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1400 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1401 /* false */ {cr_true, cr_true, cr_true, cr_true },
1402 /* true */ {cr_false, cr_false, cr_false, cr_true }
1403 },
1404 /* nandncr */
1405 {
1406 /* undefined undefined false true */
1407 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1408 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1409 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1410 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1411 },
1412 /* norncr */
1413 {
1414 /* undefined undefined false true */
1415 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1416 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1417 /* false */ {cr_false, cr_false, cr_false, cr_false },
1418 /* true */ {cr_true, cr_true, cr_true, cr_false }
1419 }
1420 };
1421
1422 UQI
1423 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1424 {
1425 return cr_logic[operation][arg1][arg2];
1426 }
1427 \f
1428 /* Cache Manipulation. */
1429 void
1430 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1431 {
1432 /* If we need to count cycles, then the cache operation will be
1433 initiated from the model profiling functions.
1434 See frvbf_model_.... */
1435 int hsr0 = GET_HSR0 ();
1436 if (GET_HSR0_ICE (hsr0))
1437 {
1438 if (model_insn)
1439 {
1440 CPU_LOAD_ADDRESS (current_cpu) = address;
1441 CPU_LOAD_LENGTH (current_cpu) = length;
1442 CPU_LOAD_LOCK (current_cpu) = lock;
1443 }
1444 else
1445 {
1446 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1447 frv_cache_preload (cache, address, length, lock);
1448 }
1449 }
1450 }
1451
1452 void
1453 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1454 {
1455 /* If we need to count cycles, then the cache operation will be
1456 initiated from the model profiling functions.
1457 See frvbf_model_.... */
1458 int hsr0 = GET_HSR0 ();
1459 if (GET_HSR0_DCE (hsr0))
1460 {
1461 if (model_insn)
1462 {
1463 CPU_LOAD_ADDRESS (current_cpu) = address;
1464 CPU_LOAD_LENGTH (current_cpu) = length;
1465 CPU_LOAD_LOCK (current_cpu) = lock;
1466 }
1467 else
1468 {
1469 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1470 frv_cache_preload (cache, address, length, lock);
1471 }
1472 }
1473 }
1474
1475 void
1476 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1477 {
1478 /* If we need to count cycles, then the cache operation will be
1479 initiated from the model profiling functions.
1480 See frvbf_model_.... */
1481 int hsr0 = GET_HSR0 ();
1482 if (GET_HSR0_ICE (hsr0))
1483 {
1484 if (model_insn)
1485 CPU_LOAD_ADDRESS (current_cpu) = address;
1486 else
1487 {
1488 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1489 frv_cache_unlock (cache, address);
1490 }
1491 }
1492 }
1493
1494 void
1495 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1496 {
1497 /* If we need to count cycles, then the cache operation will be
1498 initiated from the model profiling functions.
1499 See frvbf_model_.... */
1500 int hsr0 = GET_HSR0 ();
1501 if (GET_HSR0_DCE (hsr0))
1502 {
1503 if (model_insn)
1504 CPU_LOAD_ADDRESS (current_cpu) = address;
1505 else
1506 {
1507 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1508 frv_cache_unlock (cache, address);
1509 }
1510 }
1511 }
1512
1513 void
1514 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1515 {
1516 /* Make sure the insn was specified properly. -1 will be passed for ALL
1517 for a icei with A=0. */
1518 if (all == -1)
1519 {
1520 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1521 return;
1522 }
1523
1524 /* If we need to count cycles, then the cache operation will be
1525 initiated from the model profiling functions.
1526 See frvbf_model_.... */
1527 if (model_insn)
1528 {
1529 /* Record the all-entries flag for use in profiling. */
1530 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1531 ps->all_cache_entries = all;
1532 CPU_LOAD_ADDRESS (current_cpu) = address;
1533 }
1534 else
1535 {
1536 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1537 if (all)
1538 frv_cache_invalidate_all (cache, 0/* flush? */);
1539 else
1540 frv_cache_invalidate (cache, address, 0/* flush? */);
1541 }
1542 }
1543
1544 void
1545 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1546 {
1547 /* Make sure the insn was specified properly. -1 will be passed for ALL
1548 for a dcei with A=0. */
1549 if (all == -1)
1550 {
1551 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1552 return;
1553 }
1554
1555 /* If we need to count cycles, then the cache operation will be
1556 initiated from the model profiling functions.
1557 See frvbf_model_.... */
1558 if (model_insn)
1559 {
1560 /* Record the all-entries flag for use in profiling. */
1561 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1562 ps->all_cache_entries = all;
1563 CPU_LOAD_ADDRESS (current_cpu) = address;
1564 }
1565 else
1566 {
1567 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1568 if (all)
1569 frv_cache_invalidate_all (cache, 0/* flush? */);
1570 else
1571 frv_cache_invalidate (cache, address, 0/* flush? */);
1572 }
1573 }
1574
1575 void
1576 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1577 {
1578 /* Make sure the insn was specified properly. -1 will be passed for ALL
1579 for a dcef with A=0. */
1580 if (all == -1)
1581 {
1582 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1583 return;
1584 }
1585
1586 /* If we need to count cycles, then the cache operation will be
1587 initiated from the model profiling functions.
1588 See frvbf_model_.... */
1589 if (model_insn)
1590 {
1591 /* Record the all-entries flag for use in profiling. */
1592 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1593 ps->all_cache_entries = all;
1594 CPU_LOAD_ADDRESS (current_cpu) = address;
1595 }
1596 else
1597 {
1598 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1599 if (all)
1600 frv_cache_invalidate_all (cache, 1/* flush? */);
1601 else
1602 frv_cache_invalidate (cache, address, 1/* flush? */);
1603 }
1604 }