]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - sim/frv/traps.c
New simulator for Fujitsu frv contributed by Red Hat.
[thirdparty/binutils-gdb.git] / sim / frv / traps.c
CommitLineData
b34f6357
DB
1/* frv trap support
2 Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License along
18with this program; if not, write to the Free Software Foundation, Inc.,
1959 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21#define WANT_CPU frvbf
22#define WANT_CPU_FRVBF
23
24#include "sim-main.h"
25#include "targ-vals.h"
26#include "cgen-engine.h"
27#include "cgen-par.h"
28#include "sim-fpu.h"
29
30#include "bfd.h"
31#include "libiberty.h"
32
33/* The semantic code invokes this for invalid (unrecognized) instructions. */
34
35SEM_PC
36sim_engine_invalid_insn (SIM_CPU *current_cpu, IADDR cia, SEM_PC vpc)
37{
38 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
39 return vpc;
40}
41
42/* Process an address exception. */
43
44void
45frv_core_signal (SIM_DESC sd, SIM_CPU *current_cpu, sim_cia cia,
46 unsigned int map, int nr_bytes, address_word addr,
47 transfer_type transfer, sim_core_signals sig)
48{
49 if (sig == sim_core_unaligned_signal)
50 {
51 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
52 frv_queue_data_access_error_interrupt (current_cpu, addr);
53 else
54 frv_queue_mem_address_not_aligned_interrupt (current_cpu, addr);
55 }
56
57 frv_term (sd);
58 sim_core_signal (sd, current_cpu, cia, map, nr_bytes, addr, transfer, sig);
59}
60
61void
62frv_sim_engine_halt_hook (SIM_DESC sd, SIM_CPU *current_cpu, sim_cia cia)
63{
64 int i;
65 if (current_cpu != NULL)
66 CIA_SET (current_cpu, cia);
67
68 /* Invalidate the insn and data caches of all cpus. */
69 for (i = 0; i < MAX_NR_PROCESSORS; ++i)
70 {
71 current_cpu = STATE_CPU (sd, i);
72 frv_cache_invalidate_all (CPU_INSN_CACHE (current_cpu), 0);
73 frv_cache_invalidate_all (CPU_DATA_CACHE (current_cpu), 1);
74 }
75 frv_term (sd);
76}
77\f
78/* Read/write functions for system call interface. */
79
80static int
81syscall_read_mem (host_callback *cb, struct cb_syscall *sc,
82 unsigned long taddr, char *buf, int bytes)
83{
84 SIM_DESC sd = (SIM_DESC) sc->p1;
85 SIM_CPU *cpu = (SIM_CPU *) sc->p2;
86
87 frv_cache_invalidate_all (CPU_DATA_CACHE (cpu), 1);
88 return sim_core_read_buffer (sd, cpu, read_map, buf, taddr, bytes);
89}
90
91static int
92syscall_write_mem (host_callback *cb, struct cb_syscall *sc,
93 unsigned long taddr, const char *buf, int bytes)
94{
95 SIM_DESC sd = (SIM_DESC) sc->p1;
96 SIM_CPU *cpu = (SIM_CPU *) sc->p2;
97
98 frv_cache_invalidate_all (CPU_INSN_CACHE (cpu), 0);
99 frv_cache_invalidate_all (CPU_DATA_CACHE (cpu), 1);
100 return sim_core_write_buffer (sd, cpu, write_map, buf, taddr, bytes);
101}
102
103/* Handle TRA and TIRA insns. */
104void
105frv_itrap (SIM_CPU *current_cpu, PCADDR pc, USI base, SI offset)
106{
107 SIM_DESC sd = CPU_STATE (current_cpu);
108 host_callback *cb = STATE_CALLBACK (sd);
109 USI num = ((base + offset) & 0x7f) + 0x80;
110
111#ifdef SIM_HAVE_BREAKPOINTS
112 /* Check for breakpoints "owned" by the simulator first, regardless
113 of --environment. */
114 if (num == TRAP_BREAKPOINT)
115 {
116 /* First try sim-break.c. If it's a breakpoint the simulator "owns"
117 it doesn't return. Otherwise it returns and let's us try. */
118 sim_handle_breakpoint (sd, current_cpu, pc);
119 /* Fall through. */
120 }
121#endif
122
123 if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT)
124 {
125 frv_queue_software_interrupt (current_cpu, num);
126 return;
127 }
128
129 switch (num)
130 {
131 case TRAP_SYSCALL :
132 {
133 CB_SYSCALL s;
134 CB_SYSCALL_INIT (&s);
135 s.func = GET_H_GR (7);
136 s.arg1 = GET_H_GR (8);
137 s.arg2 = GET_H_GR (9);
138 s.arg3 = GET_H_GR (10);
139
140 if (s.func == TARGET_SYS_exit)
141 {
142 sim_engine_halt (sd, current_cpu, NULL, pc, sim_exited, s.arg1);
143 }
144
145 s.p1 = (PTR) sd;
146 s.p2 = (PTR) current_cpu;
147 s.read_mem = syscall_read_mem;
148 s.write_mem = syscall_write_mem;
149 cb_syscall (cb, &s);
150 SET_H_GR (8, s.result);
151 SET_H_GR (9, s.result2);
152 SET_H_GR (10, s.errcode);
153 break;
154 }
155
156 case TRAP_BREAKPOINT:
157 sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP);
158 break;
159
160 /* Add support for dumping registers, either at fixed traps, or all
161 unknown traps if configured with --enable-sim-trapdump. */
162 default:
163#if !TRAPDUMP
164 frv_queue_software_interrupt (current_cpu, num);
165 return;
166#endif
167
168#ifdef TRAP_REGDUMP1
169 case TRAP_REGDUMP1:
170#endif
171
172#ifdef TRAP_REGDUMP2
173 case TRAP_REGDUMP2:
174#endif
175
176#if TRAPDUMP || (defined (TRAP_REGDUMP1)) || (defined (TRAP_REGDUMP2))
177 {
178 char buf[256];
179 int i, j;
180
181 buf[0] = 0;
182 if (STATE_TEXT_SECTION (sd)
183 && pc >= STATE_TEXT_START (sd)
184 && pc < STATE_TEXT_END (sd))
185 {
186 const char *pc_filename = (const char *)0;
187 const char *pc_function = (const char *)0;
188 unsigned int pc_linenum = 0;
189
190 if (bfd_find_nearest_line (STATE_PROG_BFD (sd),
191 STATE_TEXT_SECTION (sd),
192 (struct symbol_cache_entry **) 0,
193 pc - STATE_TEXT_START (sd),
194 &pc_filename, &pc_function, &pc_linenum)
195 && (pc_function || pc_filename))
196 {
197 char *p = buf+2;
198 buf[0] = ' ';
199 buf[1] = '(';
200 if (pc_function)
201 {
202 strcpy (p, pc_function);
203 p += strlen (p);
204 }
205 else
206 {
207 char *q = (char *) strrchr (pc_filename, '/');
208 strcpy (p, (q) ? q+1 : pc_filename);
209 p += strlen (p);
210 }
211
212 if (pc_linenum)
213 {
214 sprintf (p, " line %d", pc_linenum);
215 p += strlen (p);
216 }
217
218 p[0] = ')';
219 p[1] = '\0';
220 if ((p+1) - buf > sizeof (buf))
221 abort ();
222 }
223 }
224
225 sim_io_printf (sd,
226 "\nRegister dump, pc = 0x%.8x%s, base = %u, offset = %d\n",
227 (unsigned)pc, buf, (unsigned)base, (int)offset);
228
229 for (i = 0; i < 64; i += 8)
230 {
231 long g0 = (long)GET_H_GR (i);
232 long g1 = (long)GET_H_GR (i+1);
233 long g2 = (long)GET_H_GR (i+2);
234 long g3 = (long)GET_H_GR (i+3);
235 long g4 = (long)GET_H_GR (i+4);
236 long g5 = (long)GET_H_GR (i+5);
237 long g6 = (long)GET_H_GR (i+6);
238 long g7 = (long)GET_H_GR (i+7);
239
240 if ((g0 | g1 | g2 | g3 | g4 | g5 | g6 | g7) != 0)
241 sim_io_printf (sd,
242 "\tgr%02d - gr%02d: 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx\n",
243 i, i+7, g0, g1, g2, g3, g4, g5, g6, g7);
244 }
245
246 for (i = 0; i < 64; i += 8)
247 {
248 long f0 = (long)GET_H_FR (i);
249 long f1 = (long)GET_H_FR (i+1);
250 long f2 = (long)GET_H_FR (i+2);
251 long f3 = (long)GET_H_FR (i+3);
252 long f4 = (long)GET_H_FR (i+4);
253 long f5 = (long)GET_H_FR (i+5);
254 long f6 = (long)GET_H_FR (i+6);
255 long f7 = (long)GET_H_FR (i+7);
256
257 if ((f0 | f1 | f2 | f3 | f4 | f5 | f6 | f7) != 0)
258 sim_io_printf (sd,
259 "\tfr%02d - fr%02d: 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx\n",
260 i, i+7, f0, f1, f2, f3, f4, f5, f6, f7);
261 }
262
263 sim_io_printf (sd,
264 "\tlr/lcr/cc/ccc: 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx\n",
265 (long)GET_H_SPR (272),
266 (long)GET_H_SPR (273),
267 (long)GET_H_SPR (256),
268 (long)GET_H_SPR (263));
269 }
270 break;
271#endif
272 }
273}
274
275/* Handle the MTRAP insn. */
276void
277frv_mtrap (SIM_CPU *current_cpu)
278{
279 /* Check the status of media exceptions in MSR0. */
280 SI msr = GET_MSR (0);
281 if (GET_MSR_AOVF (msr) || GET_MSR_MTT (msr))
282 frv_queue_program_interrupt (current_cpu, FRV_MP_EXCEPTION);
283}
284
285/* Handle the BREAK insn. */
286void
287frv_break (SIM_CPU *current_cpu)
288{
289 IADDR pc;
290 SIM_DESC sd = CPU_STATE (current_cpu);
291
292#ifdef SIM_HAVE_BREAKPOINTS
293 /* First try sim-break.c. If it's a breakpoint the simulator "owns"
294 it doesn't return. Otherwise it returns and let's us try. */
295 pc = GET_H_PC ();
296 sim_handle_breakpoint (sd, current_cpu, pc);
297 /* Fall through. */
298#endif
299
300 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT)
301 {
302 /* Invalidate the insn cache because the debugger will presumably
303 replace the breakpoint insn with the real one. */
304#ifndef SIM_HAVE_BREAKPOINTS
305 pc = GET_H_PC ();
306#endif
307 sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP);
308 }
309
310 frv_queue_break_interrupt (current_cpu);
311}
312
313/* Return from trap. */
314USI
315frv_rett (SIM_CPU *current_cpu, PCADDR pc, BI debug_field)
316{
317 USI new_pc;
318 /* if (normal running mode and debug_field==0
319 PC=PCSR
320 PSR.ET=1
321 PSR.S=PSR.PS
322 else if (debug running mode and debug_field==1)
323 PC=(BPCSR)
324 PSR.ET=BPSR.BET
325 PSR.S=BPSR.BS
326 change to normal running mode
327 */
328 int psr_s = GET_H_PSR_S ();
329 int psr_et = GET_H_PSR_ET ();
330
331 /* Check for exceptions in the priority order listed in the FRV Architecture
332 Volume 2. */
333 if (! psr_s)
334 {
335 /* Halt if PSR.ET is not set. See chapter 6 of the LSI. */
336 if (! psr_et)
337 {
338 SIM_DESC sd = CPU_STATE (current_cpu);
339 sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP);
340 }
341
342 /* privileged_instruction interrupt will have already been queued by
343 frv_detect_insn_access_interrupts. */
344 new_pc = pc + 4;
345 }
346 else if (psr_et)
347 {
348 /* Halt if PSR.S is set. See chapter 6 of the LSI. */
349 if (psr_s)
350 {
351 SIM_DESC sd = CPU_STATE (current_cpu);
352 sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP);
353 }
354
355 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
356 new_pc = pc + 4;
357 }
358 else if (! CPU_DEBUG_STATE (current_cpu) && debug_field == 0)
359 {
360 USI psr = GET_PSR ();
361 /* Return from normal running state. */
362 new_pc = GET_H_SPR (H_SPR_PCSR);
363 SET_PSR_ET (psr, 1);
364 SET_PSR_S (psr, GET_PSR_PS (psr));
365 sim_queue_fn_si_write (current_cpu, frvbf_h_spr_set, H_SPR_PSR, psr);
366 }
367 else if (CPU_DEBUG_STATE (current_cpu) && debug_field == 1)
368 {
369 USI psr = GET_PSR ();
370 /* Return from debug state. */
371 new_pc = GET_H_SPR (H_SPR_BPCSR);
372 SET_PSR_ET (psr, GET_H_BPSR_BET ());
373 SET_PSR_S (psr, GET_H_BPSR_BS ());
374 sim_queue_fn_si_write (current_cpu, frvbf_h_spr_set, H_SPR_PSR, psr);
375 CPU_DEBUG_STATE (current_cpu) = 0;
376 }
377 else
378 new_pc = pc + 4;
379
380 return new_pc;
381}
382\f
383/* Functions for handling non-excepting instruction side effects. */
384static SI next_available_nesr (SIM_CPU *current_cpu, SI current_index)
385{
386 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
387 if (control->spr[H_SPR_NECR].implemented)
388 {
389 int limit;
390 USI necr = GET_NECR ();
391
392 /* See if any NESRs are implemented. First need to check the validity of
393 the NECR. */
394 if (! GET_NECR_VALID (necr))
395 return NO_NESR;
396
397 limit = GET_NECR_NEN (necr);
398 for (++current_index; current_index < limit; ++current_index)
399 {
400 SI nesr = GET_NESR (current_index);
401 if (! GET_NESR_VALID (nesr))
402 return current_index;
403 }
404 }
405 return NO_NESR;
406}
407
408static SI next_valid_nesr (SIM_CPU *current_cpu, SI current_index)
409{
410 FRV_REGISTER_CONTROL *control = CPU_REGISTER_CONTROL (current_cpu);
411 if (control->spr[H_SPR_NECR].implemented)
412 {
413 int limit;
414 USI necr = GET_NECR ();
415
416 /* See if any NESRs are implemented. First need to check the validity of
417 the NECR. */
418 if (! GET_NECR_VALID (necr))
419 return NO_NESR;
420
421 limit = GET_NECR_NEN (necr);
422 for (++current_index; current_index < limit; ++current_index)
423 {
424 SI nesr = GET_NESR (current_index);
425 if (GET_NESR_VALID (nesr))
426 return current_index;
427 }
428 }
429 return NO_NESR;
430}
431
432BI
433frvbf_check_non_excepting_load (
434 SIM_CPU *current_cpu, SI base_index, SI disp_index, SI target_index,
435 SI immediate_disp, QI data_size, BI is_float
436)
437{
438 BI rc = 1; /* perform the load. */
439 SIM_DESC sd = CPU_STATE (current_cpu);
440 int daec = 0;
441 int rec = 0;
442 int ec = 0;
443 USI necr;
444 int do_elos;
445 SI NE_flags[2];
446 SI NE_base;
447 SI nesr;
448 SI ne_index;
449 FRV_REGISTER_CONTROL *control;
450
451 SI address = GET_H_GR (base_index);
452 if (disp_index >= 0)
453 address += GET_H_GR (disp_index);
454 else
455 address += immediate_disp;
456
457 /* Check for interrupt factors. */
458 switch (data_size)
459 {
460 case NESR_UQI_SIZE:
461 case NESR_QI_SIZE:
462 break;
463 case NESR_UHI_SIZE:
464 case NESR_HI_SIZE:
465 if (address & 1)
466 ec = 1;
467 break;
468 case NESR_SI_SIZE:
469 if (address & 3)
470 ec = 1;
471 break;
472 case NESR_DI_SIZE:
473 if (address & 7)
474 ec = 1;
475 if (target_index & 1)
476 rec = 1;
477 break;
478 case NESR_XI_SIZE:
479 if (address & 0xf)
480 ec = 1;
481 if (target_index & 3)
482 rec = 1;
483 break;
484 default:
485 {
486 IADDR pc = GET_H_PC ();
487 sim_engine_abort (sd, current_cpu, pc,
488 "check_non_excepting_load: Incorrect data_size\n");
489 break;
490 }
491 }
492
493 control = CPU_REGISTER_CONTROL (current_cpu);
494 if (control->spr[H_SPR_NECR].implemented)
495 {
496 necr = GET_NECR ();
497 do_elos = GET_NECR_VALID (necr) && GET_NECR_ELOS (necr);
498 }
499 else
500 do_elos = 0;
501
502 /* NECR, NESR, NEEAR are only implemented for the full frv machine. */
503 if (do_elos)
504 {
505 ne_index = next_available_nesr (current_cpu, NO_NESR);
506 if (ne_index == NO_NESR)
507 {
508 IADDR pc = GET_H_PC ();
509 sim_engine_abort (sd, current_cpu, pc,
510 "No available NESR register\n");
511 }
512
513 /* Fill in the basic fields of the NESR. */
514 nesr = GET_NESR (ne_index);
515 SET_NESR_VALID (nesr);
516 SET_NESR_EAV (nesr);
517 SET_NESR_DRN (nesr, target_index);
518 SET_NESR_SIZE (nesr, data_size);
519 SET_NESR_NEAN (nesr, ne_index);
520 if (is_float)
521 SET_NESR_FR (nesr);
522 else
523 CLEAR_NESR_FR (nesr);
524
525 /* Set the corresponding NEEAR. */
526 SET_NEEAR (ne_index, address);
527
528 SET_NESR_DAEC (nesr, 0);
529 SET_NESR_REC (nesr, 0);
530 SET_NESR_EC (nesr, 0);
531 }
532
533 /* Set the NE flag corresponding to the target register if an interrupt
534 factor was detected.
535 daec is not checked here yet, but is declared for future reference. */
536 if (is_float)
537 NE_base = H_SPR_FNER0;
538 else
539 NE_base = H_SPR_GNER0;
540
541 GET_NE_FLAGS (NE_flags, NE_base);
542 if (rec)
543 {
544 SET_NE_FLAG (NE_flags, target_index);
545 if (do_elos)
546 SET_NESR_REC (nesr, NESR_REGISTER_NOT_ALIGNED);
547 }
548
549 if (ec)
550 {
551 SET_NE_FLAG (NE_flags, target_index);
552 if (do_elos)
553 SET_NESR_EC (nesr, NESR_MEM_ADDRESS_NOT_ALIGNED);
554 }
555
556 if (do_elos)
557 SET_NESR (ne_index, nesr);
558
559 /* If no interrupt factor was detected then set the NE flag on the
560 target register if the NE flag on one of the input registers
561 is already set. */
562 if (! rec && ! ec && ! daec)
563 {
564 BI ne_flag = GET_NE_FLAG (NE_flags, base_index);
565 if (disp_index >= 0)
566 ne_flag |= GET_NE_FLAG (NE_flags, disp_index);
567 if (ne_flag)
568 {
569 SET_NE_FLAG (NE_flags, target_index);
570 rc = 0; /* Do not perform the load. */
571 }
572 else
573 CLEAR_NE_FLAG (NE_flags, target_index);
574 }
575
576 SET_NE_FLAGS (NE_base, NE_flags);
577
578 return rc; /* perform the load? */
579}
580
581/* Record state for media exception: media_cr_not_aligned. */
582void
583frvbf_media_cr_not_aligned (SIM_CPU *current_cpu)
584{
585 SIM_DESC sd = CPU_STATE (current_cpu);
586
587 /* On the fr400 this generates an illegal_instruction interrupt. */
588 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
589 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
590 else
591 frv_set_mp_exception_registers (current_cpu, MTT_CR_NOT_ALIGNED, 0);
592}
593
594/* Record state for media exception: media_acc_not_aligned. */
595void
596frvbf_media_acc_not_aligned (SIM_CPU *current_cpu)
597{
598 SIM_DESC sd = CPU_STATE (current_cpu);
599
600 /* On the fr400 this generates an illegal_instruction interrupt. */
601 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
602 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
603 else
604 frv_set_mp_exception_registers (current_cpu, MTT_ACC_NOT_ALIGNED, 0);
605}
606
607/* Record state for media exception: media_register_not_aligned. */
608void
609frvbf_media_register_not_aligned (SIM_CPU *current_cpu)
610{
611 SIM_DESC sd = CPU_STATE (current_cpu);
612
613 /* On the fr400 this generates an illegal_instruction interrupt. */
614 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
615 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
616 else
617 frv_set_mp_exception_registers (current_cpu, MTT_INVALID_FR, 0);
618}
619
620/* Record state for media exception: media_overflow. */
621void
622frvbf_media_overflow (SIM_CPU *current_cpu, int sie)
623{
624 frv_set_mp_exception_registers (current_cpu, MTT_OVERFLOW, sie);
625}
626
627/* Queue a division exception. */
628enum frv_dtt
629frvbf_division_exception (SIM_CPU *current_cpu, enum frv_dtt dtt,
630 int target_index, int non_excepting)
631{
632 /* If there was an overflow and it is masked, then record it in
633 ISR.AEXC. */
634 USI isr = GET_ISR ();
635 if ((dtt & FRV_DTT_OVERFLOW) && GET_ISR_EDE (isr))
636 {
637 dtt &= ~FRV_DTT_OVERFLOW;
638 SET_ISR_AEXC (isr);
639 SET_ISR (isr);
640 }
641 if (dtt != FRV_DTT_NO_EXCEPTION)
642 {
643 if (non_excepting)
644 {
645 /* Non excepting instruction, simply set the NE flag for the target
646 register. */
647 SI NE_flags[2];
648 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
649 SET_NE_FLAG (NE_flags, target_index);
650 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
651 }
652 else
653 frv_queue_division_exception_interrupt (current_cpu, dtt);
654 }
655 return dtt;
656}
657
658void
659frvbf_check_recovering_store (
660 SIM_CPU *current_cpu, PCADDR address, SI regno, int size, int is_float
661)
662{
663 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
664 int reg_ix;
665
666 CPU_RSTR_INVALIDATE(current_cpu) = 0;
667
668 for (reg_ix = next_valid_nesr (current_cpu, NO_NESR);
669 reg_ix != NO_NESR;
670 reg_ix = next_valid_nesr (current_cpu, reg_ix))
671 {
672 if (address == GET_H_SPR (H_SPR_NEEAR0 + reg_ix))
673 {
674 SI nesr = GET_NESR (reg_ix);
675 int nesr_drn = GET_NESR_DRN (nesr);
676 BI nesr_fr = GET_NESR_FR (nesr);
677 SI remain;
678
679 /* Invalidate cache block containing this address.
680 If we need to count cycles, then the cache operation will be
681 initiated from the model profiling functions.
682 See frvbf_model_.... */
683 if (model_insn)
684 {
685 CPU_RSTR_INVALIDATE(current_cpu) = 1;
686 CPU_LOAD_ADDRESS (current_cpu) = address;
687 }
688 else
689 frv_cache_invalidate (cache, address, 1/* flush */);
690
691 /* Copy the stored value to the register indicated by NESR.DRN. */
692 for (remain = size; remain > 0; remain -= 4)
693 {
694 SI value;
695
696 if (is_float)
697 value = GET_H_FR (regno);
698 else
699 value = GET_H_GR (regno);
700
701 switch (size)
702 {
703 case 1:
704 value &= 0xff;
705 break;
706 case 2:
707 value &= 0xffff;
708 break;
709 default:
710 break;
711 }
712
713 if (nesr_fr)
714 sim_queue_fn_sf_write (current_cpu, frvbf_h_fr_set, nesr_drn,
715 value);
716 else
717 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, nesr_drn,
718 value);
719
720 nesr_drn++;
721 regno++;
722 }
723 break; /* Only consider the first matching register. */
724 }
725 } /* loop over active neear registers. */
726}
727
728static void
729clear_nesr_neear (SIM_CPU *current_cpu, SI target_index, BI is_float)
730{
731 int reg_ix;
732
733 /* Only implemented for full frv. */
734 SIM_DESC sd = CPU_STATE (current_cpu);
735 if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_frv)
736 return;
737
738 /* Clear the appropriate NESR and NEEAR registers. */
739 for (reg_ix = next_valid_nesr (current_cpu, NO_NESR);
740 reg_ix != NO_NESR;
741 reg_ix = next_valid_nesr (current_cpu, reg_ix))
742 {
743 SI nesr;
744 /* The register is available, now check if it is active. */
745 nesr = GET_NESR (reg_ix);
746 if (GET_NESR_FR (nesr) == is_float)
747 {
748 if (target_index < 0 || GET_NESR_DRN (nesr) == target_index)
749 {
750 SET_NESR (reg_ix, 0);
751 SET_NEEAR (reg_ix, 0);
752 }
753 }
754 }
755}
756
757static void
758clear_ne_flags (
759 SIM_CPU *current_cpu,
760 SI target_index,
761 int hi_available,
762 int lo_available,
763 SI NE_base
764)
765{
766 SI NE_flags[2];
767 int exception;
768
769 GET_NE_FLAGS (NE_flags, NE_base);
770 if (target_index >= 0)
771 CLEAR_NE_FLAG (NE_flags, target_index);
772 else
773 {
774 if (lo_available)
775 NE_flags[1] = 0;
776 if (hi_available)
777 NE_flags[0] = 0;
778 }
779 SET_NE_FLAGS (NE_base, NE_flags);
780}
781
782/* Return 1 if the given register is available, 0 otherwise. TARGET_INDEX==-1
783 means to check for any register available. */
784static void
785which_registers_available (
786 SIM_CPU *current_cpu, int *hi_available, int *lo_available, int is_float
787)
788{
789 if (is_float)
790 frv_fr_registers_available (current_cpu, hi_available, lo_available);
791 else
792 frv_gr_registers_available (current_cpu, hi_available, lo_available);
793}
794
795void
796frvbf_clear_ne_flags (SIM_CPU *current_cpu, SI target_index, BI is_float)
797{
798 int hi_available;
799 int lo_available;
800 int exception;
801 SI NE_base;
802 USI necr;
803 FRV_REGISTER_CONTROL *control;
804
805 /* Check for availability of the target register(s). */
806 which_registers_available (current_cpu, & hi_available, & lo_available,
807 is_float);
808
809 /* Check to make sure that the target register is available. */
810 if (! frv_check_register_access (current_cpu, target_index,
811 hi_available, lo_available))
812 return;
813
814 /* Determine whether we're working with GR or FR registers. */
815 if (is_float)
816 NE_base = H_SPR_FNER0;
817 else
818 NE_base = H_SPR_GNER0;
819
820 /* Always clear the appropriate NE flags. */
821 clear_ne_flags (current_cpu, target_index, hi_available, lo_available,
822 NE_base);
823
824 /* Clear the appropriate NESR and NEEAR registers. */
825 control = CPU_REGISTER_CONTROL (current_cpu);
826 if (control->spr[H_SPR_NECR].implemented)
827 {
828 necr = GET_NECR ();
829 if (GET_NECR_VALID (necr) && GET_NECR_ELOS (necr))
830 clear_nesr_neear (current_cpu, target_index, is_float);
831 }
832}
833
834void
835frvbf_commit (SIM_CPU *current_cpu, SI target_index, BI is_float)
836{
837 SI NE_base;
838 SI NE_flags[2];
839 BI NE_flag;
840 int exception;
841 int hi_available;
842 int lo_available;
843 USI necr;
844 FRV_REGISTER_CONTROL *control;
845
846 /* Check for availability of the target register(s). */
847 which_registers_available (current_cpu, & hi_available, & lo_available,
848 is_float);
849
850 /* Check to make sure that the target register is available. */
851 if (! frv_check_register_access (current_cpu, target_index,
852 hi_available, lo_available))
853 return;
854
855 /* Determine whether we're working with GR or FR registers. */
856 if (is_float)
857 NE_base = H_SPR_FNER0;
858 else
859 NE_base = H_SPR_GNER0;
860
861 /* Determine whether a ne exception is pending. */
862 GET_NE_FLAGS (NE_flags, NE_base);
863 if (target_index >= 0)
864 NE_flag = GET_NE_FLAG (NE_flags, target_index);
865 else
866 {
867 NE_flag =
868 hi_available && NE_flags[0] != 0 || lo_available && NE_flags[1] != 0;
869 }
870
871 /* Always clear the appropriate NE flags. */
872 clear_ne_flags (current_cpu, target_index, hi_available, lo_available,
873 NE_base);
874
875 control = CPU_REGISTER_CONTROL (current_cpu);
876 if (control->spr[H_SPR_NECR].implemented)
877 {
878 necr = GET_NECR ();
879 if (GET_NECR_VALID (necr) && GET_NECR_ELOS (necr) && NE_flag)
880 {
881 /* Clear the appropriate NESR and NEEAR registers. */
882 clear_nesr_neear (current_cpu, target_index, is_float);
883 frv_queue_program_interrupt (current_cpu, FRV_COMMIT_EXCEPTION);
884 }
885 }
886}
887
888/* Generate the appropriate fp_exception(s) based on the given status code. */
889void
890frvbf_fpu_error (CGEN_FPU* fpu, int status)
891{
892 struct frv_fp_exception_info fp_info = {
893 FSR_NO_EXCEPTION, FTT_IEEE_754_EXCEPTION
894 };
895
896 if (status &
897 (sim_fpu_status_invalid_snan |
898 sim_fpu_status_invalid_qnan |
899 sim_fpu_status_invalid_isi |
900 sim_fpu_status_invalid_idi |
901 sim_fpu_status_invalid_zdz |
902 sim_fpu_status_invalid_imz |
903 sim_fpu_status_invalid_cvi |
904 sim_fpu_status_invalid_cmp |
905 sim_fpu_status_invalid_sqrt))
906 fp_info.fsr_mask |= FSR_INVALID_OPERATION;
907
908 if (status & sim_fpu_status_invalid_div0)
909 fp_info.fsr_mask |= FSR_DIVISION_BY_ZERO;
910
911 if (status & sim_fpu_status_inexact)
912 fp_info.fsr_mask |= FSR_INEXACT;
913
914 if (status & sim_fpu_status_overflow)
915 fp_info.fsr_mask |= FSR_OVERFLOW;
916
917 if (status & sim_fpu_status_underflow)
918 fp_info.fsr_mask |= FSR_UNDERFLOW;
919
920 if (status & sim_fpu_status_denorm)
921 {
922 fp_info.fsr_mask |= FSR_DENORMAL_INPUT;
923 fp_info.ftt = FTT_DENORMAL_INPUT;
924 }
925
926 if (fp_info.fsr_mask != FSR_NO_EXCEPTION)
927 {
928 SIM_CPU *current_cpu = (SIM_CPU *)fpu->owner;
929 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
930 }
931}