]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/frv/interrupts.c
sim: split sim-signal.h include out
[thirdparty/binutils-gdb.git] / sim / frv / interrupts.c
1 /* frv exception and interrupt support
2 Copyright (C) 1999-2021 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* This must come before any other includes. */
21 #include "defs.h"
22
23 #define WANT_CPU frvbf
24 #define WANT_CPU_FRVBF
25
26 #include "sim-main.h"
27 #include "sim-signal.h"
28 #include "bfd.h"
29 #include <stdlib.h>
30
31 /* FR-V Interrupt table.
32 Describes the interrupts supported by the FR-V.
33 This table *must* be maintained in order of interrupt priority as defined by
34 frv_interrupt_kind. */
35 #define DEFERRED 1
36 #define PRECISE 1
37 #define ITABLE_ENTRY(name, class, deferral, precision, offset) \
38 {FRV_##name, FRV_EC_##name, class, deferral, precision, offset}
39
40 struct frv_interrupt frv_interrupt_table[NUM_FRV_INTERRUPT_KINDS] =
41 {
42 /* External interrupts */
43 ITABLE_ENTRY(INTERRUPT_LEVEL_1, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x21),
44 ITABLE_ENTRY(INTERRUPT_LEVEL_2, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x22),
45 ITABLE_ENTRY(INTERRUPT_LEVEL_3, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x23),
46 ITABLE_ENTRY(INTERRUPT_LEVEL_4, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x24),
47 ITABLE_ENTRY(INTERRUPT_LEVEL_5, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x25),
48 ITABLE_ENTRY(INTERRUPT_LEVEL_6, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x26),
49 ITABLE_ENTRY(INTERRUPT_LEVEL_7, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x27),
50 ITABLE_ENTRY(INTERRUPT_LEVEL_8, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x28),
51 ITABLE_ENTRY(INTERRUPT_LEVEL_9, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x29),
52 ITABLE_ENTRY(INTERRUPT_LEVEL_10, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2a),
53 ITABLE_ENTRY(INTERRUPT_LEVEL_11, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2b),
54 ITABLE_ENTRY(INTERRUPT_LEVEL_12, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2c),
55 ITABLE_ENTRY(INTERRUPT_LEVEL_13, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2d),
56 ITABLE_ENTRY(INTERRUPT_LEVEL_14, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2e),
57 ITABLE_ENTRY(INTERRUPT_LEVEL_15, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2f),
58 /* Software interrupt */
59 ITABLE_ENTRY(TRAP_INSTRUCTION, FRV_SOFTWARE_INTERRUPT, !DEFERRED, !PRECISE, 0x80),
60 /* Program interrupts */
61 ITABLE_ENTRY(COMMIT_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x19),
62 ITABLE_ENTRY(DIVISION_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x17),
63 ITABLE_ENTRY(DATA_STORE_ERROR, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x14),
64 ITABLE_ENTRY(DATA_ACCESS_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x13),
65 ITABLE_ENTRY(DATA_ACCESS_MMU_MISS, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x12),
66 ITABLE_ENTRY(DATA_ACCESS_ERROR, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x11),
67 ITABLE_ENTRY(MP_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x0e),
68 ITABLE_ENTRY(FP_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x0d),
69 ITABLE_ENTRY(MEM_ADDRESS_NOT_ALIGNED, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x10),
70 ITABLE_ENTRY(REGISTER_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x08),
71 ITABLE_ENTRY(MP_DISABLED, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x0b),
72 ITABLE_ENTRY(FP_DISABLED, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x0a),
73 ITABLE_ENTRY(PRIVILEGED_INSTRUCTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x06),
74 ITABLE_ENTRY(ILLEGAL_INSTRUCTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x07),
75 ITABLE_ENTRY(INSTRUCTION_ACCESS_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x03),
76 ITABLE_ENTRY(INSTRUCTION_ACCESS_ERROR, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x02),
77 ITABLE_ENTRY(INSTRUCTION_ACCESS_MMU_MISS, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x01),
78 ITABLE_ENTRY(COMPOUND_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x20),
79 /* Break interrupt */
80 ITABLE_ENTRY(BREAK_EXCEPTION, FRV_BREAK_INTERRUPT, !DEFERRED, !PRECISE, 0xff),
81 /* Reset interrupt */
82 ITABLE_ENTRY(RESET, FRV_RESET_INTERRUPT, !DEFERRED, !PRECISE, 0x00)
83 };
84
85 /* The current interrupt state. */
86 struct frv_interrupt_state frv_interrupt_state;
87
88 /* maintain the address of the start of the previous VLIW insn sequence. */
89 IADDR previous_vliw_pc;
90
91 /* Add a break interrupt to the interrupt queue. */
92 struct frv_interrupt_queue_element *
93 frv_queue_break_interrupt (SIM_CPU *current_cpu)
94 {
95 return frv_queue_interrupt (current_cpu, FRV_BREAK_EXCEPTION);
96 }
97
98 /* Add a software interrupt to the interrupt queue. */
99 struct frv_interrupt_queue_element *
100 frv_queue_software_interrupt (SIM_CPU *current_cpu, SI offset)
101 {
102 struct frv_interrupt_queue_element *new_element
103 = frv_queue_interrupt (current_cpu, FRV_TRAP_INSTRUCTION);
104
105 struct frv_interrupt *interrupt = & frv_interrupt_table[new_element->kind];
106 interrupt->handler_offset = offset;
107
108 return new_element;
109 }
110
111 /* Add a program interrupt to the interrupt queue. */
112 struct frv_interrupt_queue_element *
113 frv_queue_program_interrupt (
114 SIM_CPU *current_cpu, enum frv_interrupt_kind kind
115 )
116 {
117 return frv_queue_interrupt (current_cpu, kind);
118 }
119
120 /* Add an external interrupt to the interrupt queue. */
121 struct frv_interrupt_queue_element *
122 frv_queue_external_interrupt (
123 SIM_CPU *current_cpu, enum frv_interrupt_kind kind
124 )
125 {
126 if (! GET_H_PSR_ET ()
127 || (kind != FRV_INTERRUPT_LEVEL_15 && kind < GET_H_PSR_PIL ()))
128 return NULL; /* Leave it for later. */
129
130 return frv_queue_interrupt (current_cpu, kind);
131 }
132
133 /* Add any interrupt to the interrupt queue. It will be added in reverse
134 priority order. This makes it easy to find the highest priority interrupt
135 at the end of the queue and to remove it after processing. */
136 struct frv_interrupt_queue_element *
137 frv_queue_interrupt (SIM_CPU *current_cpu, enum frv_interrupt_kind kind)
138 {
139 int i;
140 int j;
141 int limit = frv_interrupt_state.queue_index;
142 struct frv_interrupt_queue_element *new_element;
143 enum frv_interrupt_class iclass;
144
145 if (limit >= FRV_INTERRUPT_QUEUE_SIZE)
146 abort (); /* TODO: Make the queue dynamic */
147
148 /* Find the right place in the queue. */
149 for (i = 0; i < limit; ++i)
150 {
151 if (frv_interrupt_state.queue[i].kind >= kind)
152 break;
153 }
154
155 /* Don't queue two external interrupts of the same priority. */
156 iclass = frv_interrupt_table[kind].iclass;
157 if (i < limit && iclass == FRV_EXTERNAL_INTERRUPT)
158 {
159 if (frv_interrupt_state.queue[i].kind == kind)
160 return & frv_interrupt_state.queue[i];
161 }
162
163 /* Make room for the new interrupt in this spot. */
164 for (j = limit - 1; j >= i; --j)
165 frv_interrupt_state.queue[j + 1] = frv_interrupt_state.queue[j];
166
167 /* Add the new interrupt. */
168 frv_interrupt_state.queue_index++;
169 new_element = & frv_interrupt_state.queue[i];
170 new_element->kind = kind;
171 new_element->vpc = CPU_PC_GET (current_cpu);
172 new_element->u.data_written.length = 0;
173 frv_set_interrupt_queue_slot (current_cpu, new_element);
174
175 return new_element;
176 }
177
178 struct frv_interrupt_queue_element *
179 frv_queue_register_exception_interrupt (SIM_CPU *current_cpu, enum frv_rec rec)
180 {
181 struct frv_interrupt_queue_element *new_element =
182 frv_queue_program_interrupt (current_cpu, FRV_REGISTER_EXCEPTION);
183
184 new_element->u.rec = rec;
185
186 return new_element;
187 }
188
189 struct frv_interrupt_queue_element *
190 frv_queue_mem_address_not_aligned_interrupt (SIM_CPU *current_cpu, USI addr)
191 {
192 struct frv_interrupt_queue_element *new_element;
193 USI isr = GET_ISR ();
194
195 /* Make sure that this exception is not masked. */
196 if (GET_ISR_EMAM (isr))
197 return NULL;
198
199 /* Queue the interrupt. */
200 new_element = frv_queue_program_interrupt (current_cpu,
201 FRV_MEM_ADDRESS_NOT_ALIGNED);
202 new_element->eaddress = addr;
203 new_element->u.data_written = frv_interrupt_state.data_written;
204 frv_interrupt_state.data_written.length = 0;
205
206 return new_element;
207 }
208
209 struct frv_interrupt_queue_element *
210 frv_queue_data_access_error_interrupt (SIM_CPU *current_cpu, USI addr)
211 {
212 struct frv_interrupt_queue_element *new_element;
213 new_element = frv_queue_program_interrupt (current_cpu,
214 FRV_DATA_ACCESS_ERROR);
215 new_element->eaddress = addr;
216 return new_element;
217 }
218
219 struct frv_interrupt_queue_element *
220 frv_queue_data_access_exception_interrupt (SIM_CPU *current_cpu)
221 {
222 return frv_queue_program_interrupt (current_cpu, FRV_DATA_ACCESS_EXCEPTION);
223 }
224
225 struct frv_interrupt_queue_element *
226 frv_queue_instruction_access_error_interrupt (SIM_CPU *current_cpu)
227 {
228 return frv_queue_program_interrupt (current_cpu, FRV_INSTRUCTION_ACCESS_ERROR);
229 }
230
231 struct frv_interrupt_queue_element *
232 frv_queue_instruction_access_exception_interrupt (SIM_CPU *current_cpu)
233 {
234 return frv_queue_program_interrupt (current_cpu, FRV_INSTRUCTION_ACCESS_EXCEPTION);
235 }
236
237 struct frv_interrupt_queue_element *
238 frv_queue_illegal_instruction_interrupt (
239 SIM_CPU *current_cpu, const CGEN_INSN *insn
240 )
241 {
242 SIM_DESC sd = CPU_STATE (current_cpu);
243 switch (STATE_ARCHITECTURE (sd)->mach)
244 {
245 case bfd_mach_fr400:
246 case bfd_mach_fr450:
247 case bfd_mach_fr550:
248 break;
249 default:
250 /* Some machines generate fp_exception for this case. */
251 if (frv_is_float_insn (insn) || frv_is_media_insn (insn))
252 {
253 struct frv_fp_exception_info fp_info = {
254 FSR_NO_EXCEPTION, FTT_SEQUENCE_ERROR
255 };
256 return frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
257 }
258 break;
259 }
260
261 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
262 }
263
264 struct frv_interrupt_queue_element *
265 frv_queue_privileged_instruction_interrupt (SIM_CPU *current_cpu, const CGEN_INSN *insn)
266 {
267 /* The fr550 has no privileged instruction interrupt. It uses
268 illegal_instruction. */
269 SIM_DESC sd = CPU_STATE (current_cpu);
270 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
271 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
272
273 return frv_queue_program_interrupt (current_cpu, FRV_PRIVILEGED_INSTRUCTION);
274 }
275
276 struct frv_interrupt_queue_element *
277 frv_queue_float_disabled_interrupt (SIM_CPU *current_cpu)
278 {
279 /* The fr550 has no fp_disabled interrupt. It uses illegal_instruction. */
280 SIM_DESC sd = CPU_STATE (current_cpu);
281 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
282 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
283
284 return frv_queue_program_interrupt (current_cpu, FRV_FP_DISABLED);
285 }
286
287 struct frv_interrupt_queue_element *
288 frv_queue_media_disabled_interrupt (SIM_CPU *current_cpu)
289 {
290 /* The fr550 has no mp_disabled interrupt. It uses illegal_instruction. */
291 SIM_DESC sd = CPU_STATE (current_cpu);
292 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
293 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
294
295 return frv_queue_program_interrupt (current_cpu, FRV_MP_DISABLED);
296 }
297
298 struct frv_interrupt_queue_element *
299 frv_queue_non_implemented_instruction_interrupt (
300 SIM_CPU *current_cpu, const CGEN_INSN *insn
301 )
302 {
303 SIM_DESC sd = CPU_STATE (current_cpu);
304 switch (STATE_ARCHITECTURE (sd)->mach)
305 {
306 case bfd_mach_fr400:
307 case bfd_mach_fr450:
308 case bfd_mach_fr550:
309 break;
310 default:
311 /* Some machines generate fp_exception or mp_exception for this case. */
312 if (frv_is_float_insn (insn))
313 {
314 struct frv_fp_exception_info fp_info = {
315 FSR_NO_EXCEPTION, FTT_UNIMPLEMENTED_FPOP
316 };
317 return frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
318 }
319 if (frv_is_media_insn (insn))
320 {
321 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP,
322 0);
323 return NULL; /* no interrupt queued at this time. */
324 }
325 break;
326 }
327
328 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
329 }
330
331 /* Queue the given fp_exception interrupt. Also update fp_info by removing
332 masked interrupts and updating the 'slot' flield. */
333 struct frv_interrupt_queue_element *
334 frv_queue_fp_exception_interrupt (
335 SIM_CPU *current_cpu, struct frv_fp_exception_info *fp_info
336 )
337 {
338 SI fsr0 = GET_FSR (0);
339 int tem = GET_FSR_TEM (fsr0);
340 int aexc = GET_FSR_AEXC (fsr0);
341 struct frv_interrupt_queue_element *new_element = NULL;
342
343 /* Update AEXC with the interrupts that are masked. */
344 aexc |= fp_info->fsr_mask & ~tem;
345 SET_FSR_AEXC (fsr0, aexc);
346 SET_FSR (0, fsr0);
347
348 /* update fsr_mask with the exceptions that are enabled. */
349 fp_info->fsr_mask &= tem;
350
351 /* If there is an unmasked interrupt then queue it, unless
352 this was a non-excepting insn, in which case simply set the NE
353 status registers. */
354 if (frv_interrupt_state.ne_index != NE_NOFLAG
355 && fp_info->fsr_mask != FSR_NO_EXCEPTION)
356 {
357 SET_NE_FLAG (frv_interrupt_state.f_ne_flags,
358 frv_interrupt_state.ne_index);
359 /* TODO -- Set NESR for chips which support it. */
360 new_element = NULL;
361 }
362 else if (fp_info->fsr_mask != FSR_NO_EXCEPTION
363 || fp_info->ftt == FTT_UNIMPLEMENTED_FPOP
364 || fp_info->ftt == FTT_SEQUENCE_ERROR
365 || fp_info->ftt == FTT_INVALID_FR)
366 {
367 new_element = frv_queue_program_interrupt (current_cpu, FRV_FP_EXCEPTION);
368 new_element->u.fp_info = *fp_info;
369 }
370
371 return new_element;
372 }
373
374 struct frv_interrupt_queue_element *
375 frv_queue_division_exception_interrupt (SIM_CPU *current_cpu, enum frv_dtt dtt)
376 {
377 struct frv_interrupt_queue_element *new_element =
378 frv_queue_program_interrupt (current_cpu, FRV_DIVISION_EXCEPTION);
379
380 new_element->u.dtt = dtt;
381
382 return new_element;
383 }
384
385 /* Check for interrupts caused by illegal insn access. These conditions are
386 checked in the order specified by the fr400 and fr500 LSI specs. */
387 void
388 frv_detect_insn_access_interrupts (SIM_CPU *current_cpu, SCACHE *sc)
389 {
390
391 const CGEN_INSN *insn = sc->argbuf.idesc->idata;
392 SIM_DESC sd = CPU_STATE (current_cpu);
393 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
394
395 /* Check for vliw constraints. */
396 if (vliw->constraint_violation)
397 frv_queue_illegal_instruction_interrupt (current_cpu, insn);
398 /* Check for non-excepting insns. */
399 else if (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_NON_EXCEPTING)
400 && ! GET_H_PSR_NEM ())
401 frv_queue_non_implemented_instruction_interrupt (current_cpu, insn);
402 /* Check for conditional insns. */
403 else if (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_CONDITIONAL)
404 && ! GET_H_PSR_CM ())
405 frv_queue_non_implemented_instruction_interrupt (current_cpu, insn);
406 /* Make sure floating point support is enabled. */
407 else if (! GET_H_PSR_EF ())
408 {
409 /* Generate fp_disabled if it is a floating point insn or if PSR.EM is
410 off and the insns accesses a fp register. */
411 if (frv_is_float_insn (insn)
412 || (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_FR_ACCESS)
413 && ! GET_H_PSR_EM ()))
414 frv_queue_float_disabled_interrupt (current_cpu);
415 }
416 /* Make sure media support is enabled. */
417 else if (! GET_H_PSR_EM ())
418 {
419 /* Generate mp_disabled if it is a media insn. */
420 if (frv_is_media_insn (insn) || CGEN_INSN_NUM (insn) == FRV_INSN_MTRAP)
421 frv_queue_media_disabled_interrupt (current_cpu);
422 }
423 /* Check for privileged insns. */
424 else if (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_PRIVILEGED) &&
425 ! GET_H_PSR_S ())
426 frv_queue_privileged_instruction_interrupt (current_cpu, insn);
427 #if 0 /* disable for now until we find out how FSR0.QNE gets reset. */
428 else
429 {
430 /* Enter the halt state if FSR0.QNE is set and we are executing a
431 floating point insn, a media insn or an insn which access a FR
432 register. */
433 SI fsr0 = GET_FSR (0);
434 if (GET_FSR_QNE (fsr0)
435 && (frv_is_float_insn (insn) || frv_is_media_insn (insn)
436 || CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_FR_ACCESS)))
437 {
438 sim_engine_halt (sd, current_cpu, NULL, GET_H_PC (), sim_stopped,
439 SIM_SIGINT);
440 }
441 }
442 #endif
443 }
444
445 /* Record the current VLIW slot in the given interrupt queue element. */
446 void
447 frv_set_interrupt_queue_slot (
448 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
449 )
450 {
451 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
452 int slot = vliw->next_slot - 1;
453 item->slot = (*vliw->current_vliw)[slot];
454 }
455
456 /* Handle an individual interrupt. */
457 static void
458 handle_interrupt (SIM_CPU *current_cpu, IADDR pc)
459 {
460 struct frv_interrupt *interrupt;
461 int writeback_done = 0;
462 while (1)
463 {
464 /* Interrupts are queued in priority order with the highest priority
465 last. */
466 int index = frv_interrupt_state.queue_index - 1;
467 struct frv_interrupt_queue_element *item
468 = & frv_interrupt_state.queue[index];
469 interrupt = & frv_interrupt_table[item->kind];
470
471 switch (interrupt->iclass)
472 {
473 case FRV_EXTERNAL_INTERRUPT:
474 /* Perform writeback first. This may cause a higher priority
475 interrupt. */
476 if (! writeback_done)
477 {
478 frvbf_perform_writeback (current_cpu);
479 writeback_done = 1;
480 continue;
481 }
482 frv_external_interrupt (current_cpu, item, pc);
483 return;
484 case FRV_SOFTWARE_INTERRUPT:
485 frv_interrupt_state.queue_index = index;
486 frv_software_interrupt (current_cpu, item, pc);
487 return;
488 case FRV_PROGRAM_INTERRUPT:
489 /* If the program interrupt is not strict (imprecise), then perform
490 writeback first. This may, in turn, cause a higher priority
491 interrupt. */
492 if (! interrupt->precise && ! writeback_done)
493 {
494 frv_interrupt_state.imprecise_interrupt = item;
495 frvbf_perform_writeback (current_cpu);
496 writeback_done = 1;
497 continue;
498 }
499 frv_interrupt_state.queue_index = index;
500 frv_program_interrupt (current_cpu, item, pc);
501 return;
502 case FRV_BREAK_INTERRUPT:
503 frv_interrupt_state.queue_index = index;
504 frv_break_interrupt (current_cpu, interrupt, pc);
505 return;
506 case FRV_RESET_INTERRUPT:
507 break;
508 default:
509 break;
510 }
511 frv_interrupt_state.queue_index = index;
512 break; /* out of loop. */
513 }
514
515 /* We should never get here. */
516 {
517 SIM_DESC sd = CPU_STATE (current_cpu);
518 sim_engine_abort (sd, current_cpu, pc,
519 "interrupt class not supported %d\n",
520 interrupt->iclass);
521 }
522 }
523
524 /* Check to see the if the RSTR.HR or RSTR.SR bits have been set. If so, handle
525 the appropriate reset interrupt. */
526 static int
527 check_reset (SIM_CPU *current_cpu, IADDR pc)
528 {
529 int hsr0;
530 int hr;
531 int sr;
532 SI rstr;
533 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
534 IADDR address = RSTR_ADDRESS;
535
536 /* We don't want this to show up in the cache statistics, so read the
537 cache passively. */
538 if (! frv_cache_read_passive_SI (cache, address, & rstr))
539 rstr = sim_core_read_unaligned_4 (current_cpu, pc, read_map, address);
540
541 hr = GET_RSTR_HR (rstr);
542 sr = GET_RSTR_SR (rstr);
543
544 if (! hr && ! sr)
545 return 0; /* no reset. */
546
547 /* Reinitialize the machine state. */
548 if (hr)
549 frv_hardware_reset (current_cpu);
550 else
551 frv_software_reset (current_cpu);
552
553 /* Branch to the reset address. */
554 hsr0 = GET_HSR0 ();
555 if (GET_HSR0_SA (hsr0))
556 SET_H_PC (0xff000000);
557 else
558 SET_H_PC (0);
559
560 return 1; /* reset */
561 }
562
563 /* Process any pending interrupt(s) after a group of parallel insns. */
564 void
565 frv_process_interrupts (SIM_CPU *current_cpu)
566 {
567 SI NE_flags[2];
568 /* Need to save the pc here because writeback may change it (due to a
569 branch). */
570 IADDR pc = CPU_PC_GET (current_cpu);
571
572 /* Check for a reset before anything else. */
573 if (check_reset (current_cpu, pc))
574 return;
575
576 /* First queue the writes for any accumulated NE flags. */
577 if (frv_interrupt_state.f_ne_flags[0] != 0
578 || frv_interrupt_state.f_ne_flags[1] != 0)
579 {
580 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
581 NE_flags[0] |= frv_interrupt_state.f_ne_flags[0];
582 NE_flags[1] |= frv_interrupt_state.f_ne_flags[1];
583 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
584 }
585
586 /* If there is no interrupt pending, then perform parallel writeback. This
587 may cause an interrupt. */
588 if (frv_interrupt_state.queue_index <= 0)
589 frvbf_perform_writeback (current_cpu);
590
591 /* If there is an interrupt pending, then process it. */
592 if (frv_interrupt_state.queue_index > 0)
593 handle_interrupt (current_cpu, pc);
594 }
595
596 /* Find the next available ESR and return its index */
597 static int
598 esr_for_data_access_exception (
599 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
600 )
601 {
602 SIM_DESC sd = CPU_STATE (current_cpu);
603 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
604 return 8; /* Use ESR8, EPCR8. */
605
606 if (item->slot == UNIT_I0)
607 return 8; /* Use ESR8, EPCR8, EAR8, EDR8. */
608
609 return 9; /* Use ESR9, EPCR9, EAR9. */
610 }
611
612 /* Set the next available EDR register with the data which was to be stored
613 and return the index of the register. */
614 static int
615 set_edr_register (
616 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, int edr_index
617 )
618 {
619 /* EDR0, EDR4 and EDR8 are available as blocks of 4.
620 SI data uses EDR3, EDR7 and EDR11
621 DI data uses EDR2, EDR6 and EDR10
622 XI data uses EDR0, EDR4 and EDR8. */
623 int i;
624 edr_index += 4 - item->u.data_written.length;
625 for (i = 0; i < item->u.data_written.length; ++i)
626 SET_EDR (edr_index + i, item->u.data_written.words[i]);
627
628 return edr_index;
629 };
630
631 /* Clear ESFR0, EPCRx, ESRx, EARx and EDRx. */
632 static void
633 clear_exception_status_registers (SIM_CPU *current_cpu)
634 {
635 int i;
636 /* It is only necessary to clear the flag bits indicating which registers
637 are valid. */
638 SET_ESFR (0, 0);
639 SET_ESFR (1, 0);
640
641 for (i = 0; i <= 2; ++i)
642 {
643 SI esr = GET_ESR (i);
644 CLEAR_ESR_VALID (esr);
645 SET_ESR (i, esr);
646 }
647 for (i = 8; i <= 15; ++i)
648 {
649 SI esr = GET_ESR (i);
650 CLEAR_ESR_VALID (esr);
651 SET_ESR (i, esr);
652 }
653 }
654
655 /* Record state for media exception. */
656 void
657 frv_set_mp_exception_registers (
658 SIM_CPU *current_cpu, enum frv_msr_mtt mtt, int sie
659 )
660 {
661 /* Record the interrupt factor in MSR0. */
662 SI msr0 = GET_MSR (0);
663 if (GET_MSR_MTT (msr0) == MTT_NONE)
664 SET_MSR_MTT (msr0, mtt);
665
666 /* Also set the OVF bit in the appropriate MSR as well as MSR0.AOVF. */
667 if (mtt == MTT_OVERFLOW)
668 {
669 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
670 int slot = vliw->next_slot - 1;
671 SIM_DESC sd = CPU_STATE (current_cpu);
672
673 /* If this insn is in the M2 slot, then set MSR1.OVF and MSR1.SIE,
674 otherwise set MSR0.OVF and MSR0.SIE. */
675 if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550 && (*vliw->current_vliw)[slot] == UNIT_FM1)
676 {
677 SI msr = GET_MSR (1);
678 OR_MSR_SIE (msr, sie);
679 SET_MSR_OVF (msr);
680 SET_MSR (1, msr);
681 }
682 else
683 {
684 OR_MSR_SIE (msr0, sie);
685 SET_MSR_OVF (msr0);
686 }
687
688 /* Generate the interrupt now if MSR0.MPEM is set on fr550 */
689 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550 && GET_MSR_MPEM (msr0))
690 frv_queue_program_interrupt (current_cpu, FRV_MP_EXCEPTION);
691 else
692 {
693 /* Regardless of the slot, set MSR0.AOVF. */
694 SET_MSR_AOVF (msr0);
695 }
696 }
697
698 SET_MSR (0, msr0);
699 }
700
701 /* Determine the correct FQ register to use for the given exception.
702 Return -1 if a register is not available. */
703 static int
704 fq_for_exception (
705 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
706 )
707 {
708 SI fq;
709 struct frv_fp_exception_info *fp_info = & item->u.fp_info;
710
711 /* For fp_exception overflow, underflow or inexact, use FQ0 or FQ1. */
712 if (fp_info->ftt == FTT_IEEE_754_EXCEPTION
713 && (fp_info->fsr_mask & (FSR_OVERFLOW | FSR_UNDERFLOW | FSR_INEXACT)))
714 {
715 fq = GET_FQ (0);
716 if (! GET_FQ_VALID (fq))
717 return 0; /* FQ0 is available. */
718 fq = GET_FQ (1);
719 if (! GET_FQ_VALID (fq))
720 return 1; /* FQ1 is available. */
721
722 /* No FQ register is available */
723 {
724 SIM_DESC sd = CPU_STATE (current_cpu);
725 IADDR pc = CPU_PC_GET (current_cpu);
726 sim_engine_abort (sd, current_cpu, pc, "No FQ register available\n");
727 }
728 return -1;
729 }
730 /* For other exceptions, use FQ2 if the insn was in slot F0/I0 and FQ3
731 otherwise. */
732 if (item->slot == UNIT_FM0 || item->slot == UNIT_I0)
733 return 2;
734
735 return 3;
736 }
737
738 /* Set FSR0, FQ0-FQ9, depending on the interrupt. */
739 static void
740 set_fp_exception_registers (
741 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
742 )
743 {
744 int fq_index;
745 SI fq;
746 SI insn;
747 SI fsr0;
748 IADDR pc;
749 struct frv_fp_exception_info *fp_info;
750 SIM_DESC sd = CPU_STATE (current_cpu);
751
752 /* No FQ registers on fr550 */
753 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
754 {
755 /* Update the fsr. */
756 fp_info = & item->u.fp_info;
757 fsr0 = GET_FSR (0);
758 SET_FSR_FTT (fsr0, fp_info->ftt);
759 SET_FSR (0, fsr0);
760 return;
761 }
762
763 /* Select an FQ and update it with the exception information. */
764 fq_index = fq_for_exception (current_cpu, item);
765 if (fq_index == -1)
766 return;
767
768 fp_info = & item->u.fp_info;
769 fq = GET_FQ (fq_index);
770 SET_FQ_MIV (fq, MIV_FLOAT);
771 SET_FQ_SIE (fq, SIE_NIL);
772 SET_FQ_FTT (fq, fp_info->ftt);
773 SET_FQ_CEXC (fq, fp_info->fsr_mask);
774 SET_FQ_VALID (fq);
775 SET_FQ (fq_index, fq);
776
777 /* Write the failing insn into FQx.OPC. */
778 pc = item->vpc;
779 insn = GETMEMSI (current_cpu, pc, pc);
780 SET_FQ_OPC (fq_index, insn);
781
782 /* Update the fsr. */
783 fsr0 = GET_FSR (0);
784 SET_FSR_QNE (fsr0); /* FQ not empty */
785 SET_FSR_FTT (fsr0, fp_info->ftt);
786 SET_FSR (0, fsr0);
787 }
788
789 /* Record the state of a division exception in the ISR. */
790 static void
791 set_isr_exception_fields (
792 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
793 )
794 {
795 USI isr = GET_ISR ();
796 int dtt = GET_ISR_DTT (isr);
797 dtt |= item->u.dtt;
798 SET_ISR_DTT (isr, dtt);
799 SET_ISR (isr);
800 }
801
802 /* Set ESFR0, EPCRx, ESRx, EARx and EDRx, according to the given program
803 interrupt. */
804 static void
805 set_exception_status_registers (
806 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
807 )
808 {
809 struct frv_interrupt *interrupt = & frv_interrupt_table[item->kind];
810 int slot = (item->vpc - previous_vliw_pc) / 4;
811 int reg_index = -1;
812 int set_ear = 0;
813 int set_edr = 0;
814 int set_daec = 0;
815 int set_epcr = 0;
816 SI esr = 0;
817 SIM_DESC sd = CPU_STATE (current_cpu);
818
819 /* If the interrupt is strict (precise) or the interrupt is on the insns
820 in the I0 pipe, then set the 0 registers. */
821 if (interrupt->precise)
822 {
823 reg_index = 0;
824 if (interrupt->kind == FRV_REGISTER_EXCEPTION)
825 SET_ESR_REC (esr, item->u.rec);
826 else if (interrupt->kind == FRV_INSTRUCTION_ACCESS_EXCEPTION)
827 SET_ESR_IAEC (esr, item->u.iaec);
828 /* For fr550, don't set epcr for precise interrupts. */
829 if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550)
830 set_epcr = 1;
831 }
832 else
833 {
834 switch (interrupt->kind)
835 {
836 case FRV_DIVISION_EXCEPTION:
837 set_isr_exception_fields (current_cpu, item);
838 /* fall thru to set reg_index. */
839 case FRV_COMMIT_EXCEPTION:
840 /* For fr550, always use ESR0. */
841 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
842 reg_index = 0;
843 else if (item->slot == UNIT_I0)
844 reg_index = 0;
845 else if (item->slot == UNIT_I1)
846 reg_index = 1;
847 set_epcr = 1;
848 break;
849 case FRV_DATA_STORE_ERROR:
850 reg_index = 14; /* Use ESR14. */
851 break;
852 case FRV_DATA_ACCESS_ERROR:
853 reg_index = 15; /* Use ESR15, EPCR15. */
854 set_ear = 1;
855 break;
856 case FRV_DATA_ACCESS_EXCEPTION:
857 set_daec = 1;
858 /* fall through */
859 case FRV_DATA_ACCESS_MMU_MISS:
860 case FRV_MEM_ADDRESS_NOT_ALIGNED:
861 /* Get the appropriate ESR, EPCR, EAR and EDR.
862 EAR will be set. EDR will not be set if this is a store insn. */
863 set_ear = 1;
864 /* For fr550, never use EDRx. */
865 if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550)
866 if (item->u.data_written.length != 0)
867 set_edr = 1;
868 reg_index = esr_for_data_access_exception (current_cpu, item);
869 set_epcr = 1;
870 break;
871 case FRV_MP_EXCEPTION:
872 /* For fr550, use EPCR2 and ESR2. */
873 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
874 {
875 reg_index = 2;
876 set_epcr = 1;
877 }
878 break; /* MSR0-1, FQ0-9 are already set. */
879 case FRV_FP_EXCEPTION:
880 set_fp_exception_registers (current_cpu, item);
881 /* For fr550, use EPCR2 and ESR2. */
882 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
883 {
884 reg_index = 2;
885 set_epcr = 1;
886 }
887 break;
888 default:
889 {
890 SIM_DESC sd = CPU_STATE (current_cpu);
891 IADDR pc = CPU_PC_GET (current_cpu);
892 sim_engine_abort (sd, current_cpu, pc,
893 "invalid non-strict program interrupt kind: %d\n",
894 interrupt->kind);
895 break;
896 }
897 }
898 } /* non-strict (imprecise) interrupt */
899
900 /* Now fill in the selected exception status registers. */
901 if (reg_index != -1)
902 {
903 /* Now set the exception status registers. */
904 SET_ESFR_FLAG (reg_index);
905 SET_ESR_EC (esr, interrupt->ec);
906
907 if (set_epcr)
908 {
909 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
910 SET_EPCR (reg_index, previous_vliw_pc);
911 else
912 SET_EPCR (reg_index, item->vpc);
913 }
914
915 if (set_ear)
916 {
917 SET_EAR (reg_index, item->eaddress);
918 SET_ESR_EAV (esr);
919 }
920 else
921 CLEAR_ESR_EAV (esr);
922
923 if (set_edr)
924 {
925 int edn = set_edr_register (current_cpu, item, 0/* EDR0-3 */);
926 SET_ESR_EDN (esr, edn);
927 SET_ESR_EDV (esr);
928 }
929 else
930 CLEAR_ESR_EDV (esr);
931
932 if (set_daec)
933 SET_ESR_DAEC (esr, item->u.daec);
934
935 SET_ESR_VALID (esr);
936 SET_ESR (reg_index, esr);
937 }
938 }
939
940 /* Check for compound interrupts.
941 Returns NULL if no interrupt is to be processed. */
942 static struct frv_interrupt *
943 check_for_compound_interrupt (
944 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
945 )
946 {
947 struct frv_interrupt *interrupt;
948
949 /* Set the exception status registers for the original interrupt. */
950 set_exception_status_registers (current_cpu, item);
951 interrupt = & frv_interrupt_table[item->kind];
952
953 if (! interrupt->precise)
954 {
955 IADDR vpc = 0;
956 int mask = 0;
957
958 vpc = item->vpc;
959 mask = (1 << item->kind);
960
961 /* Look for more queued program interrupts which are non-deferred
962 (pending inhibit), imprecise (non-strict) different than an interrupt
963 already found and caused by a different insn. A bit mask is used
964 to keep track of interrupts which have already been detected. */
965 while (item != frv_interrupt_state.queue)
966 {
967 enum frv_interrupt_kind kind;
968 struct frv_interrupt *next_interrupt;
969 --item;
970 kind = item->kind;
971 next_interrupt = & frv_interrupt_table[kind];
972
973 if (next_interrupt->iclass != FRV_PROGRAM_INTERRUPT)
974 break; /* no program interrupts left. */
975
976 if (item->vpc == vpc)
977 continue; /* caused by the same insn. */
978
979 vpc = item->vpc;
980 if (! next_interrupt->precise && ! next_interrupt->deferred)
981 {
982 if (! (mask & (1 << kind)))
983 {
984 /* Set the exception status registers for the additional
985 interrupt. */
986 set_exception_status_registers (current_cpu, item);
987 mask |= (1 << kind);
988 interrupt = & frv_interrupt_table[FRV_COMPOUND_EXCEPTION];
989 }
990 }
991 }
992 }
993
994 /* Return with either the original interrupt, a compound_exception,
995 or no exception. */
996 return interrupt;
997 }
998
999 /* Handle a program interrupt. */
1000 void
1001 frv_program_interrupt (
1002 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, IADDR pc
1003 )
1004 {
1005 struct frv_interrupt *interrupt;
1006
1007 clear_exception_status_registers (current_cpu);
1008 /* If two or more non-deferred imprecise (non-strict) interrupts occur
1009 on two or more insns, then generate a compound_exception. */
1010 interrupt = check_for_compound_interrupt (current_cpu, item);
1011 if (interrupt != NULL)
1012 {
1013 frv_program_or_software_interrupt (current_cpu, interrupt, pc);
1014 frv_clear_interrupt_classes (FRV_SOFTWARE_INTERRUPT,
1015 FRV_PROGRAM_INTERRUPT);
1016 }
1017 }
1018
1019 /* Handle a software interrupt. */
1020 void
1021 frv_software_interrupt (
1022 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, IADDR pc
1023 )
1024 {
1025 struct frv_interrupt *interrupt = & frv_interrupt_table[item->kind];
1026 frv_program_or_software_interrupt (current_cpu, interrupt, pc);
1027 }
1028
1029 /* Handle a program interrupt or a software interrupt in non-operating mode. */
1030 void
1031 frv_non_operating_interrupt (
1032 SIM_CPU *current_cpu, enum frv_interrupt_kind kind, IADDR pc
1033 )
1034 {
1035 SIM_DESC sd = CPU_STATE (current_cpu);
1036 switch (kind)
1037 {
1038 case FRV_INTERRUPT_LEVEL_1:
1039 case FRV_INTERRUPT_LEVEL_2:
1040 case FRV_INTERRUPT_LEVEL_3:
1041 case FRV_INTERRUPT_LEVEL_4:
1042 case FRV_INTERRUPT_LEVEL_5:
1043 case FRV_INTERRUPT_LEVEL_6:
1044 case FRV_INTERRUPT_LEVEL_7:
1045 case FRV_INTERRUPT_LEVEL_8:
1046 case FRV_INTERRUPT_LEVEL_9:
1047 case FRV_INTERRUPT_LEVEL_10:
1048 case FRV_INTERRUPT_LEVEL_11:
1049 case FRV_INTERRUPT_LEVEL_12:
1050 case FRV_INTERRUPT_LEVEL_13:
1051 case FRV_INTERRUPT_LEVEL_14:
1052 case FRV_INTERRUPT_LEVEL_15:
1053 sim_engine_abort (sd, current_cpu, pc,
1054 "interrupt: external %d\n", kind + 1);
1055 break;
1056 case FRV_TRAP_INSTRUCTION:
1057 break; /* handle as in operating mode. */
1058 case FRV_COMMIT_EXCEPTION:
1059 sim_engine_abort (sd, current_cpu, pc,
1060 "interrupt: commit_exception\n");
1061 break;
1062 case FRV_DIVISION_EXCEPTION:
1063 sim_engine_abort (sd, current_cpu, pc,
1064 "interrupt: division_exception\n");
1065 break;
1066 case FRV_DATA_STORE_ERROR:
1067 sim_engine_abort (sd, current_cpu, pc,
1068 "interrupt: data_store_error\n");
1069 break;
1070 case FRV_DATA_ACCESS_EXCEPTION:
1071 sim_engine_abort (sd, current_cpu, pc,
1072 "interrupt: data_access_exception\n");
1073 break;
1074 case FRV_DATA_ACCESS_MMU_MISS:
1075 sim_engine_abort (sd, current_cpu, pc,
1076 "interrupt: data_access_mmu_miss\n");
1077 break;
1078 case FRV_DATA_ACCESS_ERROR:
1079 sim_engine_abort (sd, current_cpu, pc,
1080 "interrupt: data_access_error\n");
1081 break;
1082 case FRV_MP_EXCEPTION:
1083 sim_engine_abort (sd, current_cpu, pc,
1084 "interrupt: mp_exception\n");
1085 break;
1086 case FRV_FP_EXCEPTION:
1087 sim_engine_abort (sd, current_cpu, pc,
1088 "interrupt: fp_exception\n");
1089 break;
1090 case FRV_MEM_ADDRESS_NOT_ALIGNED:
1091 sim_engine_abort (sd, current_cpu, pc,
1092 "interrupt: mem_address_not_aligned\n");
1093 break;
1094 case FRV_REGISTER_EXCEPTION:
1095 sim_engine_abort (sd, current_cpu, pc,
1096 "interrupt: register_exception\n");
1097 break;
1098 case FRV_MP_DISABLED:
1099 sim_engine_abort (sd, current_cpu, pc,
1100 "interrupt: mp_disabled\n");
1101 break;
1102 case FRV_FP_DISABLED:
1103 sim_engine_abort (sd, current_cpu, pc,
1104 "interrupt: fp_disabled\n");
1105 break;
1106 case FRV_PRIVILEGED_INSTRUCTION:
1107 sim_engine_abort (sd, current_cpu, pc,
1108 "interrupt: privileged_instruction\n");
1109 break;
1110 case FRV_ILLEGAL_INSTRUCTION:
1111 sim_engine_abort (sd, current_cpu, pc,
1112 "interrupt: illegal_instruction\n");
1113 break;
1114 case FRV_INSTRUCTION_ACCESS_EXCEPTION:
1115 sim_engine_abort (sd, current_cpu, pc,
1116 "interrupt: instruction_access_exception\n");
1117 break;
1118 case FRV_INSTRUCTION_ACCESS_MMU_MISS:
1119 sim_engine_abort (sd, current_cpu, pc,
1120 "interrupt: instruction_access_mmu_miss\n");
1121 break;
1122 case FRV_INSTRUCTION_ACCESS_ERROR:
1123 sim_engine_abort (sd, current_cpu, pc,
1124 "interrupt: insn_access_error\n");
1125 break;
1126 case FRV_COMPOUND_EXCEPTION:
1127 sim_engine_abort (sd, current_cpu, pc,
1128 "interrupt: compound_exception\n");
1129 break;
1130 case FRV_BREAK_EXCEPTION:
1131 sim_engine_abort (sd, current_cpu, pc,
1132 "interrupt: break_exception\n");
1133 break;
1134 case FRV_RESET:
1135 sim_engine_abort (sd, current_cpu, pc,
1136 "interrupt: reset\n");
1137 break;
1138 default:
1139 sim_engine_abort (sd, current_cpu, pc,
1140 "unhandled interrupt kind: %d\n", kind);
1141 break;
1142 }
1143 }
1144
1145 /* Handle a break interrupt. */
1146 void
1147 frv_break_interrupt (
1148 SIM_CPU *current_cpu, struct frv_interrupt *interrupt, IADDR current_pc
1149 )
1150 {
1151 IADDR new_pc;
1152
1153 /* BPCSR=PC
1154 BPSR.BS=PSR.S
1155 BPSR.BET=PSR.ET
1156 PSR.S=1
1157 PSR.ET=0
1158 TBR.TT=0xff
1159 PC=TBR
1160 */
1161 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1162 SET_H_BPSR_BS (GET_H_PSR_S ());
1163 SET_H_BPSR_BET (GET_H_PSR_ET ());
1164 SET_H_PSR_S (1);
1165 SET_H_PSR_ET (0);
1166 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1167 SET_H_SPR (H_SPR_BPCSR, current_pc);
1168
1169 /* Set the new PC in the TBR. */
1170 SET_H_TBR_TT (interrupt->handler_offset);
1171 new_pc = GET_H_SPR (H_SPR_TBR);
1172 SET_H_PC (new_pc);
1173
1174 CPU_DEBUG_STATE (current_cpu) = 1;
1175 }
1176
1177 /* Handle a program interrupt or a software interrupt. */
1178 void
1179 frv_program_or_software_interrupt (
1180 SIM_CPU *current_cpu, struct frv_interrupt *interrupt, IADDR current_pc
1181 )
1182 {
1183 USI new_pc;
1184 int original_psr_et;
1185
1186 /* PCSR=PC
1187 PSR.PS=PSR.S
1188 PSR.ET=0
1189 PSR.S=1
1190 if PSR.ESR==1
1191 SR0 through SR3=GR4 through GR7
1192 TBR.TT=interrupt handler offset
1193 PC=TBR
1194 */
1195 original_psr_et = GET_H_PSR_ET ();
1196
1197 SET_H_PSR_PS (GET_H_PSR_S ());
1198 SET_H_PSR_ET (0);
1199 SET_H_PSR_S (1);
1200
1201 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1202 /* The PCSR depends on the precision of the interrupt. */
1203 if (interrupt->precise)
1204 SET_H_SPR (H_SPR_PCSR, previous_vliw_pc);
1205 else
1206 SET_H_SPR (H_SPR_PCSR, current_pc);
1207
1208 /* Set the new PC in the TBR. */
1209 SET_H_TBR_TT (interrupt->handler_offset);
1210 new_pc = GET_H_SPR (H_SPR_TBR);
1211 SET_H_PC (new_pc);
1212
1213 /* If PSR.ET was not originally set, then enter the stopped state. */
1214 if (! original_psr_et)
1215 {
1216 SIM_DESC sd = CPU_STATE (current_cpu);
1217 frv_non_operating_interrupt (current_cpu, interrupt->kind, current_pc);
1218 sim_engine_halt (sd, current_cpu, NULL, new_pc, sim_stopped, SIM_SIGINT);
1219 }
1220 }
1221
1222 /* Handle a program interrupt or a software interrupt. */
1223 void
1224 frv_external_interrupt (
1225 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, IADDR pc
1226 )
1227 {
1228 USI new_pc;
1229 struct frv_interrupt *interrupt = & frv_interrupt_table[item->kind];
1230
1231 /* Don't process the interrupt if PSR.ET is not set or if it is masked.
1232 Interrupt 15 is processed even if it appears to be masked. */
1233 if (! GET_H_PSR_ET ()
1234 || (interrupt->kind != FRV_INTERRUPT_LEVEL_15
1235 && interrupt->kind < GET_H_PSR_PIL ()))
1236 return; /* Leave it for later. */
1237
1238 /* Remove the interrupt from the queue. */
1239 --frv_interrupt_state.queue_index;
1240
1241 /* PCSR=PC
1242 PSR.PS=PSR.S
1243 PSR.ET=0
1244 PSR.S=1
1245 if PSR.ESR==1
1246 SR0 through SR3=GR4 through GR7
1247 TBR.TT=interrupt handler offset
1248 PC=TBR
1249 */
1250 SET_H_PSR_PS (GET_H_PSR_S ());
1251 SET_H_PSR_ET (0);
1252 SET_H_PSR_S (1);
1253 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1254 SET_H_SPR (H_SPR_PCSR, GET_H_PC ());
1255
1256 /* Set the new PC in the TBR. */
1257 SET_H_TBR_TT (interrupt->handler_offset);
1258 new_pc = GET_H_SPR (H_SPR_TBR);
1259 SET_H_PC (new_pc);
1260 }
1261
1262 /* Clear interrupts which fall within the range of classes given. */
1263 void
1264 frv_clear_interrupt_classes (
1265 enum frv_interrupt_class low_class, enum frv_interrupt_class high_class
1266 )
1267 {
1268 int i;
1269 int j;
1270 int limit = frv_interrupt_state.queue_index;
1271
1272 /* Find the lowest priority interrupt to be removed. */
1273 for (i = 0; i < limit; ++i)
1274 {
1275 enum frv_interrupt_kind kind = frv_interrupt_state.queue[i].kind;
1276 struct frv_interrupt* interrupt = & frv_interrupt_table[kind];
1277 if (interrupt->iclass >= low_class)
1278 break;
1279 }
1280
1281 /* Find the highest priority interrupt to be removed. */
1282 for (j = limit - 1; j >= i; --j)
1283 {
1284 enum frv_interrupt_kind kind = frv_interrupt_state.queue[j].kind;
1285 struct frv_interrupt* interrupt = & frv_interrupt_table[kind];
1286 if (interrupt->iclass <= high_class)
1287 break;
1288 }
1289
1290 /* Shuffle the remaining high priority interrupts down into the empty space
1291 left by the deleted interrupts. */
1292 if (j >= i)
1293 {
1294 for (++j; j < limit; ++j)
1295 frv_interrupt_state.queue[i++] = frv_interrupt_state.queue[j];
1296 frv_interrupt_state.queue_index -= (j - i);
1297 }
1298 }
1299
1300 /* Save data written to memory into the interrupt state so that it can be
1301 copied to the appropriate EDR register, if necessary, in the event of an
1302 interrupt. */
1303 void
1304 frv_save_data_written_for_interrupts (
1305 SIM_CPU *current_cpu, CGEN_WRITE_QUEUE_ELEMENT *item
1306 )
1307 {
1308 /* Record the slot containing the insn doing the write in the
1309 interrupt state. */
1310 frv_interrupt_state.slot = CGEN_WRITE_QUEUE_ELEMENT_PIPE (item);
1311
1312 /* Now record any data written to memory in the interrupt state. */
1313 switch (CGEN_WRITE_QUEUE_ELEMENT_KIND (item))
1314 {
1315 case CGEN_BI_WRITE:
1316 case CGEN_QI_WRITE:
1317 case CGEN_SI_WRITE:
1318 case CGEN_SF_WRITE:
1319 case CGEN_PC_WRITE:
1320 case CGEN_FN_HI_WRITE:
1321 case CGEN_FN_SI_WRITE:
1322 case CGEN_FN_SF_WRITE:
1323 case CGEN_FN_DI_WRITE:
1324 case CGEN_FN_DF_WRITE:
1325 case CGEN_FN_XI_WRITE:
1326 case CGEN_FN_PC_WRITE:
1327 break; /* Ignore writes to registers. */
1328 case CGEN_MEM_QI_WRITE:
1329 frv_interrupt_state.data_written.length = 1;
1330 frv_interrupt_state.data_written.words[0]
1331 = item->kinds.mem_qi_write.value;
1332 break;
1333 case CGEN_MEM_HI_WRITE:
1334 frv_interrupt_state.data_written.length = 1;
1335 frv_interrupt_state.data_written.words[0]
1336 = item->kinds.mem_hi_write.value;
1337 break;
1338 case CGEN_MEM_SI_WRITE:
1339 frv_interrupt_state.data_written.length = 1;
1340 frv_interrupt_state.data_written.words[0]
1341 = item->kinds.mem_si_write.value;
1342 break;
1343 case CGEN_MEM_DI_WRITE:
1344 frv_interrupt_state.data_written.length = 2;
1345 frv_interrupt_state.data_written.words[0]
1346 = item->kinds.mem_di_write.value >> 32;
1347 frv_interrupt_state.data_written.words[1]
1348 = item->kinds.mem_di_write.value;
1349 break;
1350 case CGEN_MEM_DF_WRITE:
1351 frv_interrupt_state.data_written.length = 2;
1352 frv_interrupt_state.data_written.words[0]
1353 = item->kinds.mem_df_write.value >> 32;
1354 frv_interrupt_state.data_written.words[1]
1355 = item->kinds.mem_df_write.value;
1356 break;
1357 case CGEN_MEM_XI_WRITE:
1358 frv_interrupt_state.data_written.length = 4;
1359 frv_interrupt_state.data_written.words[0]
1360 = item->kinds.mem_xi_write.value[0];
1361 frv_interrupt_state.data_written.words[1]
1362 = item->kinds.mem_xi_write.value[1];
1363 frv_interrupt_state.data_written.words[2]
1364 = item->kinds.mem_xi_write.value[2];
1365 frv_interrupt_state.data_written.words[3]
1366 = item->kinds.mem_xi_write.value[3];
1367 break;
1368 case CGEN_FN_MEM_QI_WRITE:
1369 frv_interrupt_state.data_written.length = 1;
1370 frv_interrupt_state.data_written.words[0]
1371 = item->kinds.fn_mem_qi_write.value;
1372 break;
1373 case CGEN_FN_MEM_HI_WRITE:
1374 frv_interrupt_state.data_written.length = 1;
1375 frv_interrupt_state.data_written.words[0]
1376 = item->kinds.fn_mem_hi_write.value;
1377 break;
1378 case CGEN_FN_MEM_SI_WRITE:
1379 frv_interrupt_state.data_written.length = 1;
1380 frv_interrupt_state.data_written.words[0]
1381 = item->kinds.fn_mem_si_write.value;
1382 break;
1383 case CGEN_FN_MEM_DI_WRITE:
1384 frv_interrupt_state.data_written.length = 2;
1385 frv_interrupt_state.data_written.words[0]
1386 = item->kinds.fn_mem_di_write.value >> 32;
1387 frv_interrupt_state.data_written.words[1]
1388 = item->kinds.fn_mem_di_write.value;
1389 break;
1390 case CGEN_FN_MEM_DF_WRITE:
1391 frv_interrupt_state.data_written.length = 2;
1392 frv_interrupt_state.data_written.words[0]
1393 = item->kinds.fn_mem_df_write.value >> 32;
1394 frv_interrupt_state.data_written.words[1]
1395 = item->kinds.fn_mem_df_write.value;
1396 break;
1397 case CGEN_FN_MEM_XI_WRITE:
1398 frv_interrupt_state.data_written.length = 4;
1399 frv_interrupt_state.data_written.words[0]
1400 = item->kinds.fn_mem_xi_write.value[0];
1401 frv_interrupt_state.data_written.words[1]
1402 = item->kinds.fn_mem_xi_write.value[1];
1403 frv_interrupt_state.data_written.words[2]
1404 = item->kinds.fn_mem_xi_write.value[2];
1405 frv_interrupt_state.data_written.words[3]
1406 = item->kinds.fn_mem_xi_write.value[3];
1407 break;
1408 default:
1409 {
1410 SIM_DESC sd = CPU_STATE (current_cpu);
1411 IADDR pc = CPU_PC_GET (current_cpu);
1412 sim_engine_abort (sd, current_cpu, pc,
1413 "unknown write kind during save for interrupt\n");
1414 }
1415 break;
1416 }
1417 }