]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/m32r/mloop2.in
2004-02-04 Andrew Cagney <cagney@redhat.com>
[thirdparty/binutils-gdb.git] / sim / m32r / mloop2.in
1 # Simulator main loop for m32r2. -*- C -*-
2 #
3 # Copyright 1996, 1997, 1998, 2003, 2004 Free Software Foundation, Inc.
4 #
5 # This file is part of GDB, the GNU debugger.
6 #
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2, or (at your option)
10 # any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License along
18 # with this program; if not, write to the Free Software Foundation, Inc.,
19 # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20
21 # Syntax:
22 # /bin/sh mainloop.in command
23 #
24 # Command is one of:
25 #
26 # init
27 # support
28 # extract-{simple,scache,pbb}
29 # {full,fast}-exec-{simple,scache,pbb}
30 #
31 # A target need only provide a "full" version of one of simple,scache,pbb.
32 # If the target wants it can also provide a fast version of same, or if
33 # the slow (full featured) version is `simple', then the fast version can be
34 # one of scache/pbb.
35 # A target can't provide more than this.
36
37 # ??? After a few more ports are done, revisit.
38 # Will eventually need to machine generate a lot of this.
39
40 case "x$1" in
41
42 xsupport)
43
44 cat <<EOF
45
46 /* Emit insns to write back the results of insns executed in parallel.
47 SC points to a sufficient number of scache entries for the writeback
48 handlers.
49 SC1/ID1 is the first insn (left slot, lower address).
50 SC2/ID2 is the second insn (right slot, higher address). */
51
52 static INLINE void
53 emit_par_finish (SIM_CPU *current_cpu, PCADDR pc, SCACHE *sc,
54 SCACHE *sc1, const IDESC *id1, SCACHE *sc2, const IDESC *id2)
55 {
56 ARGBUF *abuf;
57
58 abuf = &sc->argbuf;
59 id1 = id1->par_idesc;
60 abuf->fields.write.abuf = &sc1->argbuf;
61 @cpu@_fill_argbuf (current_cpu, abuf, id1, pc, 0);
62 /* no need to set trace_p,profile_p */
63 #if 0 /* not currently needed for id2 since results written directly */
64 abuf = &sc[1].argbuf;
65 id2 = id2->par_idesc;
66 abuf->fields.write.abuf = &sc2->argbuf;
67 @cpu@_fill_argbuf (current_cpu, abuf, id2, pc + 2, 0);
68 /* no need to set trace_p,profile_p */
69 #endif
70 }
71
72 static INLINE const IDESC *
73 emit_16 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
74 SCACHE *sc, int fast_p, int parallel_p)
75 {
76 ARGBUF *abuf = &sc->argbuf;
77 const IDESC *id = @cpu@_decode (current_cpu, pc, insn, insn, abuf);
78
79 if (parallel_p)
80 id = id->par_idesc;
81 @cpu@_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
82 return id;
83 }
84
85 static INLINE const IDESC *
86 emit_full16 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, SCACHE *sc,
87 int trace_p, int profile_p)
88 {
89 const IDESC *id;
90
91 @cpu@_emit_before (current_cpu, sc, pc, 1);
92 id = emit_16 (current_cpu, pc, insn, sc + 1, 0, 0);
93 @cpu@_emit_after (current_cpu, sc + 2, pc);
94 sc[1].argbuf.trace_p = trace_p;
95 sc[1].argbuf.profile_p = profile_p;
96 return id;
97 }
98
99 static INLINE const IDESC *
100 emit_parallel (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
101 SCACHE *sc, int fast_p)
102 {
103 const IDESC *id,*id2;
104
105 /* Emit both insns, then emit a finisher-upper.
106 We speed things up by handling the second insn serially
107 [not parallelly]. Then the writeback only has to deal
108 with the first insn. */
109 /* ??? Revisit to handle exceptions right. */
110
111 /* FIXME: No need to handle this parallely if second is nop. */
112 id = emit_16 (current_cpu, pc, insn >> 16, sc, fast_p, 1);
113
114 /* Note that this can never be a cti. No cti's go in the S pipeline. */
115 id2 = emit_16 (current_cpu, pc + 2, insn & 0x7fff, sc + 1, fast_p, 0);
116
117 /* Set sc/snc insns notion of where to skip to. */
118 if (IDESC_SKIP_P (id))
119 SEM_SKIP_COMPILE (current_cpu, sc, 1);
120
121 /* Emit code to finish executing the semantics
122 (write back the results). */
123 emit_par_finish (current_cpu, pc, sc + 2, sc, id, sc + 1, id2);
124
125 return id;
126 }
127
128 static INLINE const IDESC *
129 emit_full_parallel (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
130 SCACHE *sc, int trace_p, int profile_p)
131 {
132 const IDESC *id,*id2;
133
134 /* Emit both insns, then emit a finisher-upper.
135 We speed things up by handling the second insn serially
136 [not parallelly]. Then the writeback only has to deal
137 with the first insn. */
138 /* ??? Revisit to handle exceptions right. */
139
140 @cpu@_emit_before (current_cpu, sc, pc, 1);
141
142 /* FIXME: No need to handle this parallelly if second is nop. */
143 id = emit_16 (current_cpu, pc, insn >> 16, sc + 1, 0, 1);
144 sc[1].argbuf.trace_p = trace_p;
145 sc[1].argbuf.profile_p = profile_p;
146
147 @cpu@_emit_before (current_cpu, sc + 2, pc, 0);
148
149 /* Note that this can never be a cti. No cti's go in the S pipeline. */
150 id2 = emit_16 (current_cpu, pc + 2, insn & 0x7fff, sc + 3, 0, 0);
151 sc[3].argbuf.trace_p = trace_p;
152 sc[3].argbuf.profile_p = profile_p;
153
154 /* Set sc/snc insns notion of where to skip to. */
155 if (IDESC_SKIP_P (id))
156 SEM_SKIP_COMPILE (current_cpu, sc, 4);
157
158 /* Emit code to finish executing the semantics
159 (write back the results). */
160 emit_par_finish (current_cpu, pc, sc + 4, sc + 1, id, sc + 3, id2);
161
162 @cpu@_emit_after (current_cpu, sc + 5, pc);
163
164 return id;
165 }
166
167 static INLINE const IDESC *
168 emit_32 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
169 SCACHE *sc, int fast_p)
170 {
171 ARGBUF *abuf = &sc->argbuf;
172 const IDESC *id = @cpu@_decode (current_cpu, pc,
173 (USI) insn >> 16, insn, abuf);
174
175 @cpu@_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
176 return id;
177 }
178
179 static INLINE const IDESC *
180 emit_full32 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, SCACHE *sc,
181 int trace_p, int profile_p)
182 {
183 const IDESC *id;
184
185 @cpu@_emit_before (current_cpu, sc, pc, 1);
186 id = emit_32 (current_cpu, pc, insn, sc + 1, 0);
187 @cpu@_emit_after (current_cpu, sc + 2, pc);
188 sc[1].argbuf.trace_p = trace_p;
189 sc[1].argbuf.profile_p = profile_p;
190 return id;
191 }
192
193 EOF
194
195 ;;
196
197 xinit)
198
199 # Nothing needed.
200
201 ;;
202
203 xextract-pbb)
204
205 # Inputs: current_cpu, pc, sc, max_insns, FAST_P
206 # Outputs: sc, pc
207 # sc must be left pointing past the last created entry.
208 # pc must be left pointing past the last created entry.
209 # If the pbb is terminated by a cti insn, SET_CTI_VPC(sc) must be called
210 # to record the vpc of the cti insn.
211 # SET_INSN_COUNT(n) must be called to record number of real insns.
212
213 cat <<EOF
214 {
215 const IDESC *idesc;
216 int icount = 0;
217
218 if ((pc & 3) != 0)
219 {
220 /* This occurs when single stepping and when compiling the not-taken
221 part of conditional branches. */
222 UHI insn = GETIMEMUHI (current_cpu, pc);
223 int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
224 int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
225 SCACHE *cti_sc; /* ??? tmp hack */
226
227 /* A parallel insn isn't allowed here, but we don't mind nops.
228 ??? We need to wait until the insn is executed before signalling
229 the error, for situations where such signalling is wanted. */
230 #if 0
231 if ((insn & 0x8000) != 0
232 && (insn & 0x7fff) != 0x7000) /* parallel nops are ok */
233 sim_engine_invalid_insn (current_cpu, pc, 0);
234 #endif
235
236 /* Only emit before/after handlers if necessary. */
237 if (FAST_P || (! trace_p && ! profile_p))
238 {
239 idesc = emit_16 (current_cpu, pc, insn & 0x7fff, sc, FAST_P, 0);
240 cti_sc = sc;
241 ++sc;
242 --max_insns;
243 }
244 else
245 {
246 idesc = emit_full16 (current_cpu, pc, insn & 0x7fff, sc,
247 trace_p, profile_p);
248 cti_sc = sc + 1;
249 sc += 3;
250 max_insns -= 3;
251 }
252 ++icount;
253 pc += 2;
254 if (IDESC_CTI_P (idesc))
255 {
256 SET_CTI_VPC (cti_sc);
257 goto Finish;
258 }
259 }
260
261 /* There are two copies of the compiler: full(!fast) and fast.
262 The "full" case emits before/after handlers for each insn.
263 Having two copies of this code is a tradeoff, having one copy
264 seemed a bit more difficult to read (due to constantly testing
265 FAST_P). ??? On the other hand, with address ranges we'll want to
266 omit before/after handlers for unwanted insns. Having separate loops
267 for FAST/!FAST avoids constantly doing the test in the loop, but
268 typically FAST_P is a constant and such tests will get optimized out. */
269
270 if (FAST_P)
271 {
272 while (max_insns > 0)
273 {
274 USI insn = GETIMEMUSI (current_cpu, pc);
275 if ((SI) insn < 0)
276 {
277 /* 32 bit insn */
278 idesc = emit_32 (current_cpu, pc, insn, sc, 1);
279 ++sc;
280 --max_insns;
281 ++icount;
282 pc += 4;
283 if (IDESC_CTI_P (idesc))
284 {
285 SET_CTI_VPC (sc - 1);
286 break;
287 }
288 }
289 else
290 {
291 if ((insn & 0x8000) != 0) /* parallel? */
292 {
293 int up_count;
294
295 if (((insn >> 16) & 0xfff0) == 0x10f0)
296 {
297 /* FIXME: No need to handle this sequentially if system
298 calls will be able to execute after second insn in
299 parallel. ( trap #num || insn ) */
300 /* insn */
301 idesc = emit_16 (current_cpu, pc + 2, insn & 0x7fff,
302 sc, 1, 0);
303 /* trap */
304 emit_16 (current_cpu, pc, insn >> 16, sc + 1, 1, 0);
305 up_count = 2;
306 }
307 else
308 {
309 /* Yep. Here's the "interesting" [sic] part. */
310 idesc = emit_parallel (current_cpu, pc, insn, sc, 1);
311 up_count = 3;
312 }
313 sc += up_count;
314 max_insns -= up_count;
315 icount += 2;
316 pc += 4;
317 if (IDESC_CTI_P (idesc))
318 {
319 SET_CTI_VPC (sc - up_count);
320 break;
321 }
322 }
323 else /* 2 serial 16 bit insns */
324 {
325 idesc = emit_16 (current_cpu, pc, insn >> 16, sc, 1, 0);
326 ++sc;
327 --max_insns;
328 ++icount;
329 pc += 2;
330 if (IDESC_CTI_P (idesc))
331 {
332 SET_CTI_VPC (sc - 1);
333 break;
334 }
335 /* While we're guaranteed that there's room to extract the
336 insn, when single stepping we can't; the pbb must stop
337 after the first insn. */
338 if (max_insns == 0)
339 break;
340 idesc = emit_16 (current_cpu, pc, insn & 0x7fff, sc, 1, 0);
341 ++sc;
342 --max_insns;
343 ++icount;
344 pc += 2;
345 if (IDESC_CTI_P (idesc))
346 {
347 SET_CTI_VPC (sc - 1);
348 break;
349 }
350 }
351 }
352 }
353 }
354 else /* ! FAST_P */
355 {
356 while (max_insns > 0)
357 {
358 USI insn = GETIMEMUSI (current_cpu, pc);
359 int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
360 int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
361 SCACHE *cti_sc; /* ??? tmp hack */
362 if ((SI) insn < 0)
363 {
364 /* 32 bit insn
365 Only emit before/after handlers if necessary. */
366 if (trace_p || profile_p)
367 {
368 idesc = emit_full32 (current_cpu, pc, insn, sc,
369 trace_p, profile_p);
370 cti_sc = sc + 1;
371 sc += 3;
372 max_insns -= 3;
373 }
374 else
375 {
376 idesc = emit_32 (current_cpu, pc, insn, sc, 0);
377 cti_sc = sc;
378 ++sc;
379 --max_insns;
380 }
381 ++icount;
382 pc += 4;
383 if (IDESC_CTI_P (idesc))
384 {
385 SET_CTI_VPC (cti_sc);
386 break;
387 }
388 }
389 else
390 {
391 if ((insn & 0x8000) != 0) /* parallel? */
392 {
393 /* Yep. Here's the "interesting" [sic] part.
394 Only emit before/after handlers if necessary. */
395 if (trace_p || profile_p)
396 {
397 if (((insn >> 16) & 0xfff0) == 0x10f0)
398 {
399 /* FIXME: No need to handle this sequentially if
400 system calls will be able to execute after second
401 insn in parallel. ( trap #num || insn ) */
402 /* insn */
403 idesc = emit_full16 (current_cpu, pc + 2,
404 insn & 0x7fff, sc, 0, 0);
405 /* trap */
406 emit_full16 (current_cpu, pc, insn >> 16, sc + 3,
407 0, 0);
408 }
409 else
410 {
411 idesc = emit_full_parallel (current_cpu, pc, insn,
412 sc, trace_p, profile_p);
413 }
414 cti_sc = sc + 1;
415 sc += 6;
416 max_insns -= 6;
417 }
418 else
419 {
420 int up_count;
421
422 if (((insn >> 16) & 0xfff0) == 0x10f0)
423 {
424 /* FIXME: No need to handle this sequentially if
425 system calls will be able to execute after second
426 insn in parallel. ( trap #num || insn ) */
427 /* insn */
428 idesc = emit_16 (current_cpu, pc + 2, insn & 0x7fff,
429 sc, 0, 0);
430 /* trap */
431 emit_16 (current_cpu, pc, insn >> 16, sc + 1, 0, 0);
432 up_count = 2;
433 }
434 else
435 {
436 idesc = emit_parallel (current_cpu, pc, insn, sc, 0);
437 up_count = 3;
438 }
439 cti_sc = sc;
440 sc += up_count;
441 max_insns -= up_count;
442 }
443 icount += 2;
444 pc += 4;
445 if (IDESC_CTI_P (idesc))
446 {
447 SET_CTI_VPC (cti_sc);
448 break;
449 }
450 }
451 else /* 2 serial 16 bit insns */
452 {
453 /* Only emit before/after handlers if necessary. */
454 if (trace_p || profile_p)
455 {
456 idesc = emit_full16 (current_cpu, pc, insn >> 16, sc,
457 trace_p, profile_p);
458 cti_sc = sc + 1;
459 sc += 3;
460 max_insns -= 3;
461 }
462 else
463 {
464 idesc = emit_16 (current_cpu, pc, insn >> 16, sc, 0, 0);
465 cti_sc = sc;
466 ++sc;
467 --max_insns;
468 }
469 ++icount;
470 pc += 2;
471 if (IDESC_CTI_P (idesc))
472 {
473 SET_CTI_VPC (cti_sc);
474 break;
475 }
476 /* While we're guaranteed that there's room to extract the
477 insn, when single stepping we can't; the pbb must stop
478 after the first insn. */
479 if (max_insns <= 0)
480 break;
481 /* Use the same trace/profile address for the 2nd insn.
482 Saves us having to compute it and they come in pairs
483 anyway (e.g. can never branch to the 2nd insn). */
484 if (trace_p || profile_p)
485 {
486 idesc = emit_full16 (current_cpu, pc, insn & 0x7fff, sc,
487 trace_p, profile_p);
488 cti_sc = sc + 1;
489 sc += 3;
490 max_insns -= 3;
491 }
492 else
493 {
494 idesc = emit_16 (current_cpu, pc, insn & 0x7fff, sc, 0, 0);
495 cti_sc = sc;
496 ++sc;
497 --max_insns;
498 }
499 ++icount;
500 pc += 2;
501 if (IDESC_CTI_P (idesc))
502 {
503 SET_CTI_VPC (cti_sc);
504 break;
505 }
506 }
507 }
508 }
509 }
510
511 Finish:
512 SET_INSN_COUNT (icount);
513 }
514 EOF
515
516 ;;
517
518 xfull-exec-pbb)
519
520 # Inputs: current_cpu, vpc, FAST_P
521 # Outputs: vpc
522 # vpc is the virtual program counter.
523
524 cat <<EOF
525 #define DEFINE_SWITCH
526 #include "sem2-switch.c"
527 EOF
528
529 ;;
530
531 *)
532 echo "Invalid argument to mainloop.in: $1" >&2
533 exit 1
534 ;;
535
536 esac