]> git.ipfire.org Git - thirdparty/qemu.git/blob - target/rx/translate.c
6b52424d0f860888e42b04593f5d6b861f4c2097
[thirdparty/qemu.git] / target / rx / translate.c
1 /*
2 * RX translation
3 *
4 * Copyright (c) 2019 Yoshinori Sato
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "trace-tcg.h"
30 #include "exec/log.h"
31
32 typedef struct DisasContext {
33 DisasContextBase base;
34 CPURXState *env;
35 uint32_t pc;
36 } DisasContext;
37
38 typedef struct DisasCompare {
39 TCGv value;
40 TCGv temp;
41 TCGCond cond;
42 } DisasCompare;
43
44 const char rx_crname[][6] = {
45 "psw", "pc", "usp", "fpsw", "", "", "", "",
46 "bpsw", "bpc", "isp", "fintv", "intb", "", "", "",
47 };
48
49 /* Target-specific values for dc->base.is_jmp. */
50 #define DISAS_JUMP DISAS_TARGET_0
51 #define DISAS_UPDATE DISAS_TARGET_1
52 #define DISAS_EXIT DISAS_TARGET_2
53
54 /* global register indexes */
55 static TCGv cpu_regs[16];
56 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
57 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
58 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
59 static TCGv cpu_fintv, cpu_intb, cpu_pc;
60 static TCGv_i64 cpu_acc;
61
62 #define cpu_sp cpu_regs[0]
63
64 #include "exec/gen-icount.h"
65
66 /* decoder helper */
67 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
68 int i, int n)
69 {
70 while (++i <= n) {
71 uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++);
72 insn |= b << (32 - i * 8);
73 }
74 return insn;
75 }
76
77 static uint32_t li(DisasContext *ctx, int sz)
78 {
79 int32_t tmp, addr;
80 CPURXState *env = ctx->env;
81 addr = ctx->base.pc_next;
82
83 tcg_debug_assert(sz < 4);
84 switch (sz) {
85 case 1:
86 ctx->base.pc_next += 1;
87 return cpu_ldsb_code(env, addr);
88 case 2:
89 ctx->base.pc_next += 2;
90 return cpu_ldsw_code(env, addr);
91 case 3:
92 ctx->base.pc_next += 3;
93 tmp = cpu_ldsb_code(env, addr + 2) << 16;
94 tmp |= cpu_lduw_code(env, addr) & 0xffff;
95 return tmp;
96 case 0:
97 ctx->base.pc_next += 4;
98 return cpu_ldl_code(env, addr);
99 }
100 return 0;
101 }
102
103 static int bdsp_s(DisasContext *ctx, int d)
104 {
105 /*
106 * 0 -> 8
107 * 1 -> 9
108 * 2 -> 10
109 * 3 -> 3
110 * :
111 * 7 -> 7
112 */
113 if (d < 3) {
114 d += 8;
115 }
116 return d;
117 }
118
119 /* Include the auto-generated decoder. */
120 #include "decode.inc.c"
121
122 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
123 {
124 RXCPU *cpu = RXCPU(cs);
125 CPURXState *env = &cpu->env;
126 int i;
127 uint32_t psw;
128
129 psw = rx_cpu_pack_psw(env);
130 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
131 env->pc, psw);
132 for (i = 0; i < 16; i += 4) {
133 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
134 i, env->regs[i], i + 1, env->regs[i + 1],
135 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
136 }
137 }
138
139 static bool use_goto_tb(DisasContext *dc, target_ulong dest)
140 {
141 if (unlikely(dc->base.singlestep_enabled)) {
142 return false;
143 } else {
144 return true;
145 }
146 }
147
148 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
149 {
150 if (use_goto_tb(dc, dest)) {
151 tcg_gen_goto_tb(n);
152 tcg_gen_movi_i32(cpu_pc, dest);
153 tcg_gen_exit_tb(dc->base.tb, n);
154 } else {
155 tcg_gen_movi_i32(cpu_pc, dest);
156 if (dc->base.singlestep_enabled) {
157 gen_helper_debug(cpu_env);
158 } else {
159 tcg_gen_lookup_and_goto_ptr();
160 }
161 }
162 dc->base.is_jmp = DISAS_NORETURN;
163 }
164
165 /* generic load wrapper */
166 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
167 {
168 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
169 }
170
171 /* unsigned load wrapper */
172 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
173 {
174 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
175 }
176
177 /* generic store wrapper */
178 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
179 {
180 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
181 }
182
183 /* [ri, rb] */
184 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
185 int size, int ri, int rb)
186 {
187 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
188 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
189 }
190
191 /* dsp[reg] */
192 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
193 int ld, int size, int reg)
194 {
195 uint32_t dsp;
196
197 tcg_debug_assert(ld < 3);
198 switch (ld) {
199 case 0:
200 return cpu_regs[reg];
201 case 1:
202 dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size;
203 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
204 ctx->base.pc_next += 1;
205 return mem;
206 case 2:
207 dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size;
208 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
209 ctx->base.pc_next += 2;
210 return mem;
211 }
212 return NULL;
213 }
214
215 static inline MemOp mi_to_mop(unsigned mi)
216 {
217 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
218 tcg_debug_assert(mi < 5);
219 return mop[mi];
220 }
221
222 /* load source operand */
223 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
224 int ld, int mi, int rs)
225 {
226 TCGv addr;
227 MemOp mop;
228 if (ld < 3) {
229 mop = mi_to_mop(mi);
230 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
231 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
232 return mem;
233 } else {
234 return cpu_regs[rs];
235 }
236 }
237
238 /* Processor mode check */
239 static int is_privileged(DisasContext *ctx, int is_exception)
240 {
241 if (FIELD_EX32(ctx->base.tb->flags, PSW, PM)) {
242 if (is_exception) {
243 gen_helper_raise_privilege_violation(cpu_env);
244 }
245 return 0;
246 } else {
247 return 1;
248 }
249 }
250
251 /* generate QEMU condition */
252 static void psw_cond(DisasCompare *dc, uint32_t cond)
253 {
254 tcg_debug_assert(cond < 16);
255 switch (cond) {
256 case 0: /* z */
257 dc->cond = TCG_COND_EQ;
258 dc->value = cpu_psw_z;
259 break;
260 case 1: /* nz */
261 dc->cond = TCG_COND_NE;
262 dc->value = cpu_psw_z;
263 break;
264 case 2: /* c */
265 dc->cond = TCG_COND_NE;
266 dc->value = cpu_psw_c;
267 break;
268 case 3: /* nc */
269 dc->cond = TCG_COND_EQ;
270 dc->value = cpu_psw_c;
271 break;
272 case 4: /* gtu (C& ~Z) == 1 */
273 case 5: /* leu (C& ~Z) == 0 */
274 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
275 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
276 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
277 dc->value = dc->temp;
278 break;
279 case 6: /* pz (S == 0) */
280 dc->cond = TCG_COND_GE;
281 dc->value = cpu_psw_s;
282 break;
283 case 7: /* n (S == 1) */
284 dc->cond = TCG_COND_LT;
285 dc->value = cpu_psw_s;
286 break;
287 case 8: /* ge (S^O)==0 */
288 case 9: /* lt (S^O)==1 */
289 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
290 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
291 dc->value = dc->temp;
292 break;
293 case 10: /* gt ((S^O)|Z)==0 */
294 case 11: /* le ((S^O)|Z)==1 */
295 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
296 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
297 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
298 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
299 dc->value = dc->temp;
300 break;
301 case 12: /* o */
302 dc->cond = TCG_COND_LT;
303 dc->value = cpu_psw_o;
304 break;
305 case 13: /* no */
306 dc->cond = TCG_COND_GE;
307 dc->value = cpu_psw_o;
308 break;
309 case 14: /* always true */
310 dc->cond = TCG_COND_ALWAYS;
311 dc->value = dc->temp;
312 break;
313 case 15: /* always false */
314 dc->cond = TCG_COND_NEVER;
315 dc->value = dc->temp;
316 break;
317 }
318 }
319
320 static void move_from_cr(TCGv ret, int cr, uint32_t pc)
321 {
322 TCGv z = tcg_const_i32(0);
323 switch (cr) {
324 case 0: /* PSW */
325 gen_helper_pack_psw(ret, cpu_env);
326 break;
327 case 1: /* PC */
328 tcg_gen_movi_i32(ret, pc);
329 break;
330 case 2: /* USP */
331 tcg_gen_movcond_i32(TCG_COND_NE, ret,
332 cpu_psw_u, z, cpu_sp, cpu_usp);
333 break;
334 case 3: /* FPSW */
335 tcg_gen_mov_i32(ret, cpu_fpsw);
336 break;
337 case 8: /* BPSW */
338 tcg_gen_mov_i32(ret, cpu_bpsw);
339 break;
340 case 9: /* BPC */
341 tcg_gen_mov_i32(ret, cpu_bpc);
342 break;
343 case 10: /* ISP */
344 tcg_gen_movcond_i32(TCG_COND_EQ, ret,
345 cpu_psw_u, z, cpu_sp, cpu_isp);
346 break;
347 case 11: /* FINTV */
348 tcg_gen_mov_i32(ret, cpu_fintv);
349 break;
350 case 12: /* INTB */
351 tcg_gen_mov_i32(ret, cpu_intb);
352 break;
353 default:
354 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
355 /* Unimplement registers return 0 */
356 tcg_gen_movi_i32(ret, 0);
357 break;
358 }
359 tcg_temp_free(z);
360 }
361
362 static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
363 {
364 TCGv z;
365 if (cr >= 8 && !is_privileged(ctx, 0)) {
366 /* Some control registers can only be written in privileged mode. */
367 qemu_log_mask(LOG_GUEST_ERROR,
368 "disallow control register write %s", rx_crname[cr]);
369 return;
370 }
371 z = tcg_const_i32(0);
372 switch (cr) {
373 case 0: /* PSW */
374 gen_helper_set_psw(cpu_env, val);
375 break;
376 /* case 1: to PC not supported */
377 case 2: /* USP */
378 tcg_gen_mov_i32(cpu_usp, val);
379 tcg_gen_movcond_i32(TCG_COND_NE, cpu_sp,
380 cpu_psw_u, z, cpu_usp, cpu_sp);
381 break;
382 case 3: /* FPSW */
383 gen_helper_set_fpsw(cpu_env, val);
384 break;
385 case 8: /* BPSW */
386 tcg_gen_mov_i32(cpu_bpsw, val);
387 break;
388 case 9: /* BPC */
389 tcg_gen_mov_i32(cpu_bpc, val);
390 break;
391 case 10: /* ISP */
392 tcg_gen_mov_i32(cpu_isp, val);
393 /* if PSW.U is 0, copy isp to r0 */
394 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_sp,
395 cpu_psw_u, z, cpu_isp, cpu_sp);
396 break;
397 case 11: /* FINTV */
398 tcg_gen_mov_i32(cpu_fintv, val);
399 break;
400 case 12: /* INTB */
401 tcg_gen_mov_i32(cpu_intb, val);
402 break;
403 default:
404 qemu_log_mask(LOG_GUEST_ERROR,
405 "Unimplement control register %d", cr);
406 break;
407 }
408 tcg_temp_free(z);
409 }
410
411 static void push(TCGv val)
412 {
413 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
414 rx_gen_st(MO_32, val, cpu_sp);
415 }
416
417 static void pop(TCGv ret)
418 {
419 rx_gen_ld(MO_32, ret, cpu_sp);
420 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
421 }
422
423 /* mov.<bwl> rs,dsp5[rd] */
424 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
425 {
426 TCGv mem;
427 mem = tcg_temp_new();
428 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
429 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
430 tcg_temp_free(mem);
431 return true;
432 }
433
434 /* mov.<bwl> dsp5[rs],rd */
435 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
436 {
437 TCGv mem;
438 mem = tcg_temp_new();
439 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
440 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
441 tcg_temp_free(mem);
442 return true;
443 }
444
445 /* mov.l #uimm4,rd */
446 /* mov.l #uimm8,rd */
447 /* mov.l #imm,rd */
448 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
449 {
450 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
451 return true;
452 }
453
454 /* mov.<bwl> #uimm8,dsp[rd] */
455 /* mov.<bwl> #imm, dsp[rd] */
456 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
457 {
458 TCGv imm, mem;
459 imm = tcg_const_i32(a->imm);
460 mem = tcg_temp_new();
461 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
462 rx_gen_st(a->sz, imm, mem);
463 tcg_temp_free(imm);
464 tcg_temp_free(mem);
465 return true;
466 }
467
468 /* mov.<bwl> [ri,rb],rd */
469 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
470 {
471 TCGv mem;
472 mem = tcg_temp_new();
473 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
474 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
475 tcg_temp_free(mem);
476 return true;
477 }
478
479 /* mov.<bwl> rd,[ri,rb] */
480 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
481 {
482 TCGv mem;
483 mem = tcg_temp_new();
484 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
485 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
486 tcg_temp_free(mem);
487 return true;
488 }
489
490 /* mov.<bwl> dsp[rs],dsp[rd] */
491 /* mov.<bwl> rs,dsp[rd] */
492 /* mov.<bwl> dsp[rs],rd */
493 /* mov.<bwl> rs,rd */
494 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
495 {
496 static void (* const mov[])(TCGv ret, TCGv arg) = {
497 tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32,
498 };
499 TCGv tmp, mem, addr;
500 if (a->lds == 3 && a->ldd == 3) {
501 /* mov.<bwl> rs,rd */
502 mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
503 return true;
504 }
505
506 mem = tcg_temp_new();
507 if (a->lds == 3) {
508 /* mov.<bwl> rs,dsp[rd] */
509 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
510 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
511 } else if (a->ldd == 3) {
512 /* mov.<bwl> dsp[rs],rd */
513 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
514 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
515 } else {
516 /* mov.<bwl> dsp[rs],dsp[rd] */
517 tmp = tcg_temp_new();
518 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
519 rx_gen_ld(a->sz, tmp, addr);
520 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
521 rx_gen_st(a->sz, tmp, addr);
522 tcg_temp_free(tmp);
523 }
524 tcg_temp_free(mem);
525 return true;
526 }
527
528 /* mov.<bwl> rs,[rd+] */
529 /* mov.<bwl> rs,[-rd] */
530 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
531 {
532 TCGv val;
533 val = tcg_temp_new();
534 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
535 if (a->ad == 1) {
536 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
537 }
538 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
539 if (a->ad == 0) {
540 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
541 }
542 tcg_temp_free(val);
543 return true;
544 }
545
546 /* mov.<bwl> [rd+],rs */
547 /* mov.<bwl> [-rd],rs */
548 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
549 {
550 TCGv val;
551 val = tcg_temp_new();
552 if (a->ad == 1) {
553 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
554 }
555 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
556 if (a->ad == 0) {
557 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
558 }
559 tcg_gen_mov_i32(cpu_regs[a->rs], val);
560 tcg_temp_free(val);
561 return true;
562 }
563
564 /* movu.<bw> dsp5[rs],rd */
565 /* movu.<bw> dsp[rs],rd */
566 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
567 {
568 TCGv mem;
569 mem = tcg_temp_new();
570 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
571 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
572 tcg_temp_free(mem);
573 return true;
574 }
575
576 /* movu.<bw> rs,rd */
577 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
578 {
579 static void (* const ext[])(TCGv ret, TCGv arg) = {
580 tcg_gen_ext8u_i32, tcg_gen_ext16u_i32,
581 };
582 ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
583 return true;
584 }
585
586 /* movu.<bw> [ri,rb],rd */
587 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
588 {
589 TCGv mem;
590 mem = tcg_temp_new();
591 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
592 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
593 tcg_temp_free(mem);
594 return true;
595 }
596
597 /* movu.<bw> [rd+],rs */
598 /* mov.<bw> [-rd],rs */
599 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
600 {
601 TCGv val;
602 val = tcg_temp_new();
603 if (a->ad == 1) {
604 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
605 }
606 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
607 if (a->ad == 0) {
608 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
609 }
610 tcg_gen_mov_i32(cpu_regs[a->rs], val);
611 tcg_temp_free(val);
612 return true;
613 }
614
615
616 /* pop rd */
617 static bool trans_POP(DisasContext *ctx, arg_POP *a)
618 {
619 /* mov.l [r0+], rd */
620 arg_MOV_rp mov_a;
621 mov_a.rd = 0;
622 mov_a.rs = a->rd;
623 mov_a.ad = 0;
624 mov_a.sz = MO_32;
625 trans_MOV_pr(ctx, &mov_a);
626 return true;
627 }
628
629 /* popc cr */
630 static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
631 {
632 TCGv val;
633 val = tcg_temp_new();
634 pop(val);
635 move_to_cr(ctx, val, a->cr);
636 if (a->cr == 0 && is_privileged(ctx, 0)) {
637 /* PSW.I may be updated here. exit TB. */
638 ctx->base.is_jmp = DISAS_UPDATE;
639 }
640 tcg_temp_free(val);
641 return true;
642 }
643
644 /* popm rd-rd2 */
645 static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
646 {
647 int r;
648 if (a->rd == 0 || a->rd >= a->rd2) {
649 qemu_log_mask(LOG_GUEST_ERROR,
650 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
651 }
652 r = a->rd;
653 while (r <= a->rd2 && r < 16) {
654 pop(cpu_regs[r++]);
655 }
656 return true;
657 }
658
659
660 /* push.<bwl> rs */
661 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
662 {
663 TCGv val;
664 val = tcg_temp_new();
665 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
666 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
667 rx_gen_st(a->sz, val, cpu_sp);
668 tcg_temp_free(val);
669 return true;
670 }
671
672 /* push.<bwl> dsp[rs] */
673 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
674 {
675 TCGv mem, val, addr;
676 mem = tcg_temp_new();
677 val = tcg_temp_new();
678 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
679 rx_gen_ld(a->sz, val, addr);
680 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
681 rx_gen_st(a->sz, val, cpu_sp);
682 tcg_temp_free(mem);
683 tcg_temp_free(val);
684 return true;
685 }
686
687 /* pushc rx */
688 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
689 {
690 TCGv val;
691 val = tcg_temp_new();
692 move_from_cr(val, a->cr, ctx->pc);
693 push(val);
694 tcg_temp_free(val);
695 return true;
696 }
697
698 /* pushm rs-rs2 */
699 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
700 {
701 int r;
702
703 if (a->rs == 0 || a->rs >= a->rs2) {
704 qemu_log_mask(LOG_GUEST_ERROR,
705 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
706 }
707 r = a->rs2;
708 while (r >= a->rs && r >= 0) {
709 push(cpu_regs[r--]);
710 }
711 return true;
712 }
713
714 /* xchg rs,rd */
715 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
716 {
717 TCGv tmp;
718 tmp = tcg_temp_new();
719 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
720 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
721 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
722 tcg_temp_free(tmp);
723 return true;
724 }
725
726 /* xchg dsp[rs].<mi>,rd */
727 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
728 {
729 TCGv mem, addr;
730 mem = tcg_temp_new();
731 switch (a->mi) {
732 case 0: /* dsp[rs].b */
733 case 1: /* dsp[rs].w */
734 case 2: /* dsp[rs].l */
735 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
736 break;
737 case 3: /* dsp[rs].uw */
738 case 4: /* dsp[rs].ub */
739 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
740 break;
741 default:
742 g_assert_not_reached();
743 }
744 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
745 0, mi_to_mop(a->mi));
746 tcg_temp_free(mem);
747 return true;
748 }
749
750 static inline void stcond(TCGCond cond, int rd, int imm)
751 {
752 TCGv z;
753 TCGv _imm;
754 z = tcg_const_i32(0);
755 _imm = tcg_const_i32(imm);
756 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
757 _imm, cpu_regs[rd]);
758 tcg_temp_free(z);
759 tcg_temp_free(_imm);
760 }
761
762 /* stz #imm,rd */
763 static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
764 {
765 stcond(TCG_COND_EQ, a->rd, a->imm);
766 return true;
767 }
768
769 /* stnz #imm,rd */
770 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
771 {
772 stcond(TCG_COND_NE, a->rd, a->imm);
773 return true;
774 }
775
776 /* sccnd.<bwl> rd */
777 /* sccnd.<bwl> dsp:[rd] */
778 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
779 {
780 DisasCompare dc;
781 TCGv val, mem, addr;
782 dc.temp = tcg_temp_new();
783 psw_cond(&dc, a->cd);
784 if (a->ld < 3) {
785 val = tcg_temp_new();
786 mem = tcg_temp_new();
787 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
788 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
789 rx_gen_st(a->sz, val, addr);
790 tcg_temp_free(val);
791 tcg_temp_free(mem);
792 } else {
793 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
794 }
795 tcg_temp_free(dc.temp);
796 return true;
797 }
798
799 /* rtsd #imm */
800 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
801 {
802 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
803 pop(cpu_pc);
804 ctx->base.is_jmp = DISAS_JUMP;
805 return true;
806 }
807
808 /* rtsd #imm, rd-rd2 */
809 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
810 {
811 int dst;
812 int adj;
813
814 if (a->rd2 >= a->rd) {
815 adj = a->imm - (a->rd2 - a->rd + 1);
816 } else {
817 adj = a->imm - (15 - a->rd + 1);
818 }
819
820 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
821 dst = a->rd;
822 while (dst <= a->rd2 && dst < 16) {
823 pop(cpu_regs[dst++]);
824 }
825 pop(cpu_pc);
826 ctx->base.is_jmp = DISAS_JUMP;
827 return true;
828 }
829
830 typedef void (*op2fn)(TCGv ret, TCGv arg1);
831 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
832
833 static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
834 {
835 opr(cpu_regs[dst], cpu_regs[src]);
836 }
837
838 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
839 {
840 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
841 }
842
843 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
844 {
845 TCGv imm = tcg_const_i32(src2);
846 opr(cpu_regs[dst], cpu_regs[src], imm);
847 tcg_temp_free(imm);
848 }
849
850 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
851 int dst, int src, int ld, int mi)
852 {
853 TCGv val, mem;
854 mem = tcg_temp_new();
855 val = rx_load_source(ctx, mem, ld, mi, src);
856 opr(cpu_regs[dst], cpu_regs[dst], val);
857 tcg_temp_free(mem);
858 }
859
860 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
861 {
862 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
863 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
864 tcg_gen_mov_i32(ret, cpu_psw_s);
865 }
866
867 /* and #uimm:4, rd */
868 /* and #imm, rd */
869 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
870 {
871 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
872 return true;
873 }
874
875 /* and dsp[rs], rd */
876 /* and rs,rd */
877 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
878 {
879 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
880 return true;
881 }
882
883 /* and rs,rs2,rd */
884 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
885 {
886 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
887 return true;
888 }
889
890 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
891 {
892 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
893 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
894 tcg_gen_mov_i32(ret, cpu_psw_s);
895 }
896
897 /* or #uimm:4, rd */
898 /* or #imm, rd */
899 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
900 {
901 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
902 return true;
903 }
904
905 /* or dsp[rs], rd */
906 /* or rs,rd */
907 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
908 {
909 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
910 return true;
911 }
912
913 /* or rs,rs2,rd */
914 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
915 {
916 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
917 return true;
918 }
919
920 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
921 {
922 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
923 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
924 tcg_gen_mov_i32(ret, cpu_psw_s);
925 }
926
927 /* xor #imm, rd */
928 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
929 {
930 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
931 return true;
932 }
933
934 /* xor dsp[rs], rd */
935 /* xor rs,rd */
936 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
937 {
938 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
939 return true;
940 }
941
942 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
943 {
944 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
945 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
946 }
947
948 /* tst #imm, rd */
949 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
950 {
951 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
952 return true;
953 }
954
955 /* tst dsp[rs], rd */
956 /* tst rs, rd */
957 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
958 {
959 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
960 return true;
961 }
962
963 static void rx_not(TCGv ret, TCGv arg1)
964 {
965 tcg_gen_not_i32(ret, arg1);
966 tcg_gen_mov_i32(cpu_psw_z, ret);
967 tcg_gen_mov_i32(cpu_psw_s, ret);
968 }
969
970 /* not rd */
971 /* not rs, rd */
972 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
973 {
974 rx_gen_op_rr(rx_not, a->rd, a->rs);
975 return true;
976 }
977
978 static void rx_neg(TCGv ret, TCGv arg1)
979 {
980 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
981 tcg_gen_neg_i32(ret, arg1);
982 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
983 tcg_gen_mov_i32(cpu_psw_z, ret);
984 tcg_gen_mov_i32(cpu_psw_s, ret);
985 }
986
987
988 /* neg rd */
989 /* neg rs, rd */
990 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
991 {
992 rx_gen_op_rr(rx_neg, a->rd, a->rs);
993 return true;
994 }
995
996 /* ret = arg1 + arg2 + psw_c */
997 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
998 {
999 TCGv z;
1000 z = tcg_const_i32(0);
1001 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
1002 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
1003 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1004 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1005 tcg_gen_xor_i32(z, arg1, arg2);
1006 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1007 tcg_gen_mov_i32(ret, cpu_psw_s);
1008 tcg_temp_free(z);
1009 }
1010
1011 /* adc #imm, rd */
1012 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
1013 {
1014 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
1015 return true;
1016 }
1017
1018 /* adc rs, rd */
1019 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
1020 {
1021 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
1022 return true;
1023 }
1024
1025 /* adc dsp[rs], rd */
1026 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
1027 {
1028 /* mi only 2 */
1029 if (a->mi != 2) {
1030 return false;
1031 }
1032 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1033 return true;
1034 }
1035
1036 /* ret = arg1 + arg2 */
1037 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1038 {
1039 TCGv z;
1040 z = tcg_const_i32(0);
1041 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1042 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1043 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1044 tcg_gen_xor_i32(z, arg1, arg2);
1045 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1046 tcg_gen_mov_i32(ret, cpu_psw_s);
1047 tcg_temp_free(z);
1048 }
1049
1050 /* add #uimm4, rd */
1051 /* add #imm, rs, rd */
1052 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1053 {
1054 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1055 return true;
1056 }
1057
1058 /* add rs, rd */
1059 /* add dsp[rs], rd */
1060 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1061 {
1062 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1063 return true;
1064 }
1065
1066 /* add rs, rs2, rd */
1067 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1068 {
1069 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1070 return true;
1071 }
1072
1073 /* ret = arg1 - arg2 */
1074 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1075 {
1076 TCGv temp;
1077 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1078 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1079 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1080 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1081 temp = tcg_temp_new_i32();
1082 tcg_gen_xor_i32(temp, arg1, arg2);
1083 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
1084 tcg_temp_free_i32(temp);
1085 /* CMP not requred return */
1086 if (ret) {
1087 tcg_gen_mov_i32(ret, cpu_psw_s);
1088 }
1089 }
1090 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1091 {
1092 rx_sub(NULL, arg1, arg2);
1093 }
1094 /* ret = arg1 - arg2 - !psw_c */
1095 /* -> ret = arg1 + ~arg2 + psw_c */
1096 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1097 {
1098 TCGv temp;
1099 temp = tcg_temp_new();
1100 tcg_gen_not_i32(temp, arg2);
1101 rx_adc(ret, arg1, temp);
1102 tcg_temp_free(temp);
1103 }
1104
1105 /* cmp #imm4, rs2 */
1106 /* cmp #imm8, rs2 */
1107 /* cmp #imm, rs2 */
1108 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1109 {
1110 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1111 return true;
1112 }
1113
1114 /* cmp rs, rs2 */
1115 /* cmp dsp[rs], rs2 */
1116 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1117 {
1118 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1119 return true;
1120 }
1121
1122 /* sub #imm4, rd */
1123 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1124 {
1125 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1126 return true;
1127 }
1128
1129 /* sub rs, rd */
1130 /* sub dsp[rs], rd */
1131 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1132 {
1133 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1134 return true;
1135 }
1136
1137 /* sub rs2, rs, rd */
1138 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1139 {
1140 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1141 return true;
1142 }
1143
1144 /* sbb rs, rd */
1145 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1146 {
1147 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1148 return true;
1149 }
1150
1151 /* sbb dsp[rs], rd */
1152 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1153 {
1154 /* mi only 2 */
1155 if (a->mi != 2) {
1156 return false;
1157 }
1158 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1159 return true;
1160 }
1161
1162 static void rx_abs(TCGv ret, TCGv arg1)
1163 {
1164 TCGv neg;
1165 TCGv zero;
1166 neg = tcg_temp_new();
1167 zero = tcg_const_i32(0);
1168 tcg_gen_neg_i32(neg, arg1);
1169 tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
1170 tcg_temp_free(neg);
1171 tcg_temp_free(zero);
1172 }
1173
1174 /* abs rd */
1175 /* abs rs, rd */
1176 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1177 {
1178 rx_gen_op_rr(rx_abs, a->rd, a->rs);
1179 return true;
1180 }
1181
1182 /* max #imm, rd */
1183 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1184 {
1185 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1186 return true;
1187 }
1188
1189 /* max rs, rd */
1190 /* max dsp[rs], rd */
1191 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1192 {
1193 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1194 return true;
1195 }
1196
1197 /* min #imm, rd */
1198 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1199 {
1200 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1201 return true;
1202 }
1203
1204 /* min rs, rd */
1205 /* min dsp[rs], rd */
1206 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1207 {
1208 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1209 return true;
1210 }
1211
1212 /* mul #uimm4, rd */
1213 /* mul #imm, rd */
1214 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1215 {
1216 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1217 return true;
1218 }
1219
1220 /* mul rs, rd */
1221 /* mul dsp[rs], rd */
1222 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1223 {
1224 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1225 return true;
1226 }
1227
1228 /* mul rs, rs2, rd */
1229 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1230 {
1231 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1232 return true;
1233 }
1234
1235 /* emul #imm, rd */
1236 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1237 {
1238 TCGv imm = tcg_const_i32(a->imm);
1239 if (a->rd > 14) {
1240 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1241 }
1242 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1243 cpu_regs[a->rd], imm);
1244 tcg_temp_free(imm);
1245 return true;
1246 }
1247
1248 /* emul rs, rd */
1249 /* emul dsp[rs], rd */
1250 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1251 {
1252 TCGv val, mem;
1253 if (a->rd > 14) {
1254 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1255 }
1256 mem = tcg_temp_new();
1257 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1258 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1259 cpu_regs[a->rd], val);
1260 tcg_temp_free(mem);
1261 return true;
1262 }
1263
1264 /* emulu #imm, rd */
1265 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1266 {
1267 TCGv imm = tcg_const_i32(a->imm);
1268 if (a->rd > 14) {
1269 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1270 }
1271 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1272 cpu_regs[a->rd], imm);
1273 tcg_temp_free(imm);
1274 return true;
1275 }
1276
1277 /* emulu rs, rd */
1278 /* emulu dsp[rs], rd */
1279 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1280 {
1281 TCGv val, mem;
1282 if (a->rd > 14) {
1283 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1284 }
1285 mem = tcg_temp_new();
1286 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1287 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1288 cpu_regs[a->rd], val);
1289 tcg_temp_free(mem);
1290 return true;
1291 }
1292
1293 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1294 {
1295 gen_helper_div(ret, cpu_env, arg1, arg2);
1296 }
1297
1298 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1299 {
1300 gen_helper_divu(ret, cpu_env, arg1, arg2);
1301 }
1302
1303 /* div #imm, rd */
1304 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1305 {
1306 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1307 return true;
1308 }
1309
1310 /* div rs, rd */
1311 /* div dsp[rs], rd */
1312 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1313 {
1314 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1315 return true;
1316 }
1317
1318 /* divu #imm, rd */
1319 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1320 {
1321 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1322 return true;
1323 }
1324
1325 /* divu rs, rd */
1326 /* divu dsp[rs], rd */
1327 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1328 {
1329 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1330 return true;
1331 }
1332
1333
1334 /* shll #imm:5, rd */
1335 /* shll #imm:5, rs2, rd */
1336 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1337 {
1338 TCGv tmp;
1339 tmp = tcg_temp_new();
1340 if (a->imm) {
1341 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1342 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1343 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1344 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1345 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1346 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1347 } else {
1348 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1349 tcg_gen_movi_i32(cpu_psw_c, 0);
1350 tcg_gen_movi_i32(cpu_psw_o, 0);
1351 }
1352 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1353 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1354 return true;
1355 }
1356
1357 /* shll rs, rd */
1358 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1359 {
1360 TCGLabel *noshift, *done;
1361 TCGv count, tmp;
1362
1363 noshift = gen_new_label();
1364 done = gen_new_label();
1365 /* if (cpu_regs[a->rs]) { */
1366 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1367 count = tcg_const_i32(32);
1368 tmp = tcg_temp_new();
1369 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1370 tcg_gen_sub_i32(count, count, tmp);
1371 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1372 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1373 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1374 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1375 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1376 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1377 tcg_gen_br(done);
1378 /* } else { */
1379 gen_set_label(noshift);
1380 tcg_gen_movi_i32(cpu_psw_c, 0);
1381 tcg_gen_movi_i32(cpu_psw_o, 0);
1382 /* } */
1383 gen_set_label(done);
1384 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1385 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1386 tcg_temp_free(count);
1387 tcg_temp_free(tmp);
1388 return true;
1389 }
1390
1391 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1392 unsigned int alith)
1393 {
1394 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1395 tcg_gen_shri_i32, tcg_gen_sari_i32,
1396 };
1397 tcg_debug_assert(alith < 2);
1398 if (imm) {
1399 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1400 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1401 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1402 } else {
1403 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1404 tcg_gen_movi_i32(cpu_psw_c, 0);
1405 }
1406 tcg_gen_movi_i32(cpu_psw_o, 0);
1407 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1408 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1409 }
1410
1411 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1412 {
1413 TCGLabel *noshift, *done;
1414 TCGv count;
1415 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1416 tcg_gen_shri_i32, tcg_gen_sari_i32,
1417 };
1418 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1419 tcg_gen_shr_i32, tcg_gen_sar_i32,
1420 };
1421 tcg_debug_assert(alith < 2);
1422 noshift = gen_new_label();
1423 done = gen_new_label();
1424 count = tcg_temp_new();
1425 /* if (cpu_regs[rs]) { */
1426 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1427 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1428 tcg_gen_subi_i32(count, count, 1);
1429 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1430 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1431 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1432 tcg_gen_br(done);
1433 /* } else { */
1434 gen_set_label(noshift);
1435 tcg_gen_movi_i32(cpu_psw_c, 0);
1436 /* } */
1437 gen_set_label(done);
1438 tcg_gen_movi_i32(cpu_psw_o, 0);
1439 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1440 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1441 tcg_temp_free(count);
1442 }
1443
1444 /* shar #imm:5, rd */
1445 /* shar #imm:5, rs2, rd */
1446 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1447 {
1448 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1449 return true;
1450 }
1451
1452 /* shar rs, rd */
1453 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1454 {
1455 shiftr_reg(a->rd, a->rs, 1);
1456 return true;
1457 }
1458
1459 /* shlr #imm:5, rd */
1460 /* shlr #imm:5, rs2, rd */
1461 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1462 {
1463 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1464 return true;
1465 }
1466
1467 /* shlr rs, rd */
1468 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1469 {
1470 shiftr_reg(a->rd, a->rs, 0);
1471 return true;
1472 }
1473
1474 /* rolc rd */
1475 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1476 {
1477 TCGv tmp;
1478 tmp = tcg_temp_new();
1479 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1480 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1481 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1482 tcg_gen_mov_i32(cpu_psw_c, tmp);
1483 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1484 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1485 tcg_temp_free(tmp);
1486 return true;
1487 }
1488
1489 /* rorc rd */
1490 static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1491 {
1492 TCGv tmp;
1493 tmp = tcg_temp_new();
1494 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1495 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1496 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1497 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1498 tcg_gen_mov_i32(cpu_psw_c, tmp);
1499 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1500 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1501 return true;
1502 }
1503
1504 enum {ROTR = 0, ROTL = 1};
1505 enum {ROT_IMM = 0, ROT_REG = 1};
1506 static inline void rx_rot(int ir, int dir, int rd, int src)
1507 {
1508 switch (dir) {
1509 case ROTL:
1510 if (ir == ROT_IMM) {
1511 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1512 } else {
1513 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1514 }
1515 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1516 break;
1517 case ROTR:
1518 if (ir == ROT_IMM) {
1519 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1520 } else {
1521 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1522 }
1523 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1524 break;
1525 }
1526 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1527 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1528 }
1529
1530 /* rotl #imm, rd */
1531 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1532 {
1533 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1534 return true;
1535 }
1536
1537 /* rotl rs, rd */
1538 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1539 {
1540 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1541 return true;
1542 }
1543
1544 /* rotr #imm, rd */
1545 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1546 {
1547 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1548 return true;
1549 }
1550
1551 /* rotr rs, rd */
1552 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1553 {
1554 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1555 return true;
1556 }
1557
1558 /* revl rs, rd */
1559 static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1560 {
1561 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1562 return true;
1563 }
1564
1565 /* revw rs, rd */
1566 static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1567 {
1568 TCGv tmp;
1569 tmp = tcg_temp_new();
1570 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1571 tcg_gen_shli_i32(tmp, tmp, 8);
1572 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1573 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1574 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1575 tcg_temp_free(tmp);
1576 return true;
1577 }
1578
1579 /* conditional branch helper */
1580 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1581 {
1582 DisasCompare dc;
1583 TCGLabel *t, *done;
1584
1585 switch (cd) {
1586 case 0 ... 13:
1587 dc.temp = tcg_temp_new();
1588 psw_cond(&dc, cd);
1589 t = gen_new_label();
1590 done = gen_new_label();
1591 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1592 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1593 tcg_gen_br(done);
1594 gen_set_label(t);
1595 gen_goto_tb(ctx, 1, ctx->pc + dst);
1596 gen_set_label(done);
1597 tcg_temp_free(dc.temp);
1598 break;
1599 case 14:
1600 /* always true case */
1601 gen_goto_tb(ctx, 0, ctx->pc + dst);
1602 break;
1603 case 15:
1604 /* always false case */
1605 /* Nothing do */
1606 break;
1607 }
1608 }
1609
1610 /* beq dsp:3 / bne dsp:3 */
1611 /* beq dsp:8 / bne dsp:8 */
1612 /* bc dsp:8 / bnc dsp:8 */
1613 /* bgtu dsp:8 / bleu dsp:8 */
1614 /* bpz dsp:8 / bn dsp:8 */
1615 /* bge dsp:8 / blt dsp:8 */
1616 /* bgt dsp:8 / ble dsp:8 */
1617 /* bo dsp:8 / bno dsp:8 */
1618 /* beq dsp:16 / bne dsp:16 */
1619 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1620 {
1621 rx_bcnd_main(ctx, a->cd, a->dsp);
1622 return true;
1623 }
1624
1625 /* bra dsp:3 */
1626 /* bra dsp:8 */
1627 /* bra dsp:16 */
1628 /* bra dsp:24 */
1629 static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1630 {
1631 rx_bcnd_main(ctx, 14, a->dsp);
1632 return true;
1633 }
1634
1635 /* bra rs */
1636 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1637 {
1638 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1639 ctx->base.is_jmp = DISAS_JUMP;
1640 return true;
1641 }
1642
1643 static inline void rx_save_pc(DisasContext *ctx)
1644 {
1645 TCGv pc = tcg_const_i32(ctx->base.pc_next);
1646 push(pc);
1647 tcg_temp_free(pc);
1648 }
1649
1650 /* jmp rs */
1651 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1652 {
1653 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1654 ctx->base.is_jmp = DISAS_JUMP;
1655 return true;
1656 }
1657
1658 /* jsr rs */
1659 static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1660 {
1661 rx_save_pc(ctx);
1662 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1663 ctx->base.is_jmp = DISAS_JUMP;
1664 return true;
1665 }
1666
1667 /* bsr dsp:16 */
1668 /* bsr dsp:24 */
1669 static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1670 {
1671 rx_save_pc(ctx);
1672 rx_bcnd_main(ctx, 14, a->dsp);
1673 return true;
1674 }
1675
1676 /* bsr rs */
1677 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1678 {
1679 rx_save_pc(ctx);
1680 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1681 ctx->base.is_jmp = DISAS_JUMP;
1682 return true;
1683 }
1684
1685 /* rts */
1686 static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1687 {
1688 pop(cpu_pc);
1689 ctx->base.is_jmp = DISAS_JUMP;
1690 return true;
1691 }
1692
1693 /* nop */
1694 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1695 {
1696 return true;
1697 }
1698
1699 /* scmpu */
1700 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1701 {
1702 gen_helper_scmpu(cpu_env);
1703 return true;
1704 }
1705
1706 /* smovu */
1707 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1708 {
1709 gen_helper_smovu(cpu_env);
1710 return true;
1711 }
1712
1713 /* smovf */
1714 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1715 {
1716 gen_helper_smovf(cpu_env);
1717 return true;
1718 }
1719
1720 /* smovb */
1721 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1722 {
1723 gen_helper_smovb(cpu_env);
1724 return true;
1725 }
1726
1727 #define STRING(op) \
1728 do { \
1729 TCGv size = tcg_const_i32(a->sz); \
1730 gen_helper_##op(cpu_env, size); \
1731 tcg_temp_free(size); \
1732 } while (0)
1733
1734 /* suntile.<bwl> */
1735 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1736 {
1737 STRING(suntil);
1738 return true;
1739 }
1740
1741 /* swhile.<bwl> */
1742 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1743 {
1744 STRING(swhile);
1745 return true;
1746 }
1747 /* sstr.<bwl> */
1748 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1749 {
1750 STRING(sstr);
1751 return true;
1752 }
1753
1754 /* rmpa.<bwl> */
1755 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1756 {
1757 STRING(rmpa);
1758 return true;
1759 }
1760
1761 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1762 {
1763 TCGv_i64 tmp0, tmp1;
1764 tmp0 = tcg_temp_new_i64();
1765 tmp1 = tcg_temp_new_i64();
1766 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1767 tcg_gen_sari_i64(tmp0, tmp0, 16);
1768 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1769 tcg_gen_sari_i64(tmp1, tmp1, 16);
1770 tcg_gen_mul_i64(ret, tmp0, tmp1);
1771 tcg_gen_shli_i64(ret, ret, 16);
1772 tcg_temp_free_i64(tmp0);
1773 tcg_temp_free_i64(tmp1);
1774 }
1775
1776 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1777 {
1778 TCGv_i64 tmp0, tmp1;
1779 tmp0 = tcg_temp_new_i64();
1780 tmp1 = tcg_temp_new_i64();
1781 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1782 tcg_gen_ext16s_i64(tmp0, tmp0);
1783 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1784 tcg_gen_ext16s_i64(tmp1, tmp1);
1785 tcg_gen_mul_i64(ret, tmp0, tmp1);
1786 tcg_gen_shli_i64(ret, ret, 16);
1787 tcg_temp_free_i64(tmp0);
1788 tcg_temp_free_i64(tmp1);
1789 }
1790
1791 /* mulhi rs,rs2 */
1792 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1793 {
1794 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1795 return true;
1796 }
1797
1798 /* mullo rs,rs2 */
1799 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1800 {
1801 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1802 return true;
1803 }
1804
1805 /* machi rs,rs2 */
1806 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1807 {
1808 TCGv_i64 tmp;
1809 tmp = tcg_temp_new_i64();
1810 rx_mul64hi(tmp, a->rs, a->rs2);
1811 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1812 tcg_temp_free_i64(tmp);
1813 return true;
1814 }
1815
1816 /* maclo rs,rs2 */
1817 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1818 {
1819 TCGv_i64 tmp;
1820 tmp = tcg_temp_new_i64();
1821 rx_mul64lo(tmp, a->rs, a->rs2);
1822 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1823 tcg_temp_free_i64(tmp);
1824 return true;
1825 }
1826
1827 /* mvfachi rd */
1828 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1829 {
1830 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1831 return true;
1832 }
1833
1834 /* mvfacmi rd */
1835 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1836 {
1837 TCGv_i64 rd64;
1838 rd64 = tcg_temp_new_i64();
1839 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1840 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1841 tcg_temp_free_i64(rd64);
1842 return true;
1843 }
1844
1845 /* mvtachi rs */
1846 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1847 {
1848 TCGv_i64 rs64;
1849 rs64 = tcg_temp_new_i64();
1850 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1851 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1852 tcg_temp_free_i64(rs64);
1853 return true;
1854 }
1855
1856 /* mvtaclo rs */
1857 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1858 {
1859 TCGv_i64 rs64;
1860 rs64 = tcg_temp_new_i64();
1861 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1862 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1863 tcg_temp_free_i64(rs64);
1864 return true;
1865 }
1866
1867 /* racw #imm */
1868 static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1869 {
1870 TCGv imm = tcg_const_i32(a->imm + 1);
1871 gen_helper_racw(cpu_env, imm);
1872 tcg_temp_free(imm);
1873 return true;
1874 }
1875
1876 /* sat rd */
1877 static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1878 {
1879 TCGv tmp, z;
1880 tmp = tcg_temp_new();
1881 z = tcg_const_i32(0);
1882 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1883 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1884 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1885 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1886 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1887 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1888 tcg_temp_free(tmp);
1889 tcg_temp_free(z);
1890 return true;
1891 }
1892
1893 /* satr */
1894 static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1895 {
1896 gen_helper_satr(cpu_env);
1897 return true;
1898 }
1899
1900 #define cat3(a, b, c) a##b##c
1901 #define FOP(name, op) \
1902 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1903 cat3(arg_, name, _ir) * a) \
1904 { \
1905 TCGv imm = tcg_const_i32(li(ctx, 0)); \
1906 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1907 cpu_regs[a->rd], imm); \
1908 tcg_temp_free(imm); \
1909 return true; \
1910 } \
1911 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1912 cat3(arg_, name, _mr) * a) \
1913 { \
1914 TCGv val, mem; \
1915 mem = tcg_temp_new(); \
1916 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1917 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1918 cpu_regs[a->rd], val); \
1919 tcg_temp_free(mem); \
1920 return true; \
1921 }
1922
1923 #define FCONVOP(name, op) \
1924 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1925 { \
1926 TCGv val, mem; \
1927 mem = tcg_temp_new(); \
1928 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1929 gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
1930 tcg_temp_free(mem); \
1931 return true; \
1932 }
1933
1934 FOP(FADD, fadd)
1935 FOP(FSUB, fsub)
1936 FOP(FMUL, fmul)
1937 FOP(FDIV, fdiv)
1938
1939 /* fcmp #imm, rd */
1940 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1941 {
1942 TCGv imm = tcg_const_i32(li(ctx, 0));
1943 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
1944 tcg_temp_free(imm);
1945 return true;
1946 }
1947
1948 /* fcmp dsp[rs], rd */
1949 /* fcmp rs, rd */
1950 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1951 {
1952 TCGv val, mem;
1953 mem = tcg_temp_new();
1954 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1955 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val);
1956 tcg_temp_free(mem);
1957 return true;
1958 }
1959
1960 FCONVOP(FTOI, ftoi)
1961 FCONVOP(ROUND, round)
1962
1963 /* itof rs, rd */
1964 /* itof dsp[rs], rd */
1965 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1966 {
1967 TCGv val, mem;
1968 mem = tcg_temp_new();
1969 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1970 gen_helper_itof(cpu_regs[a->rd], cpu_env, val);
1971 tcg_temp_free(mem);
1972 return true;
1973 }
1974
1975 static void rx_bsetm(TCGv mem, TCGv mask)
1976 {
1977 TCGv val;
1978 val = tcg_temp_new();
1979 rx_gen_ld(MO_8, val, mem);
1980 tcg_gen_or_i32(val, val, mask);
1981 rx_gen_st(MO_8, val, mem);
1982 tcg_temp_free(val);
1983 }
1984
1985 static void rx_bclrm(TCGv mem, TCGv mask)
1986 {
1987 TCGv val;
1988 val = tcg_temp_new();
1989 rx_gen_ld(MO_8, val, mem);
1990 tcg_gen_andc_i32(val, val, mask);
1991 rx_gen_st(MO_8, val, mem);
1992 tcg_temp_free(val);
1993 }
1994
1995 static void rx_btstm(TCGv mem, TCGv mask)
1996 {
1997 TCGv val;
1998 val = tcg_temp_new();
1999 rx_gen_ld(MO_8, val, mem);
2000 tcg_gen_and_i32(val, val, mask);
2001 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
2002 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2003 tcg_temp_free(val);
2004 }
2005
2006 static void rx_bnotm(TCGv mem, TCGv mask)
2007 {
2008 TCGv val;
2009 val = tcg_temp_new();
2010 rx_gen_ld(MO_8, val, mem);
2011 tcg_gen_xor_i32(val, val, mask);
2012 rx_gen_st(MO_8, val, mem);
2013 tcg_temp_free(val);
2014 }
2015
2016 static void rx_bsetr(TCGv reg, TCGv mask)
2017 {
2018 tcg_gen_or_i32(reg, reg, mask);
2019 }
2020
2021 static void rx_bclrr(TCGv reg, TCGv mask)
2022 {
2023 tcg_gen_andc_i32(reg, reg, mask);
2024 }
2025
2026 static inline void rx_btstr(TCGv reg, TCGv mask)
2027 {
2028 TCGv t0;
2029 t0 = tcg_temp_new();
2030 tcg_gen_and_i32(t0, reg, mask);
2031 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
2032 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2033 tcg_temp_free(t0);
2034 }
2035
2036 static inline void rx_bnotr(TCGv reg, TCGv mask)
2037 {
2038 tcg_gen_xor_i32(reg, reg, mask);
2039 }
2040
2041 #define BITOP(name, op) \
2042 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
2043 cat3(arg_, name, _im) * a) \
2044 { \
2045 TCGv mask, mem, addr; \
2046 mem = tcg_temp_new(); \
2047 mask = tcg_const_i32(1 << a->imm); \
2048 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2049 cat3(rx_, op, m)(addr, mask); \
2050 tcg_temp_free(mask); \
2051 tcg_temp_free(mem); \
2052 return true; \
2053 } \
2054 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
2055 cat3(arg_, name, _ir) * a) \
2056 { \
2057 TCGv mask; \
2058 mask = tcg_const_i32(1 << a->imm); \
2059 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2060 tcg_temp_free(mask); \
2061 return true; \
2062 } \
2063 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
2064 cat3(arg_, name, _rr) * a) \
2065 { \
2066 TCGv mask, b; \
2067 mask = tcg_const_i32(1); \
2068 b = tcg_temp_new(); \
2069 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
2070 tcg_gen_shl_i32(mask, mask, b); \
2071 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2072 tcg_temp_free(mask); \
2073 tcg_temp_free(b); \
2074 return true; \
2075 } \
2076 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
2077 cat3(arg_, name, _rm) * a) \
2078 { \
2079 TCGv mask, mem, addr, b; \
2080 mask = tcg_const_i32(1); \
2081 b = tcg_temp_new(); \
2082 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
2083 tcg_gen_shl_i32(mask, mask, b); \
2084 mem = tcg_temp_new(); \
2085 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2086 cat3(rx_, op, m)(addr, mask); \
2087 tcg_temp_free(mem); \
2088 tcg_temp_free(mask); \
2089 tcg_temp_free(b); \
2090 return true; \
2091 }
2092
2093 BITOP(BSET, bset)
2094 BITOP(BCLR, bclr)
2095 BITOP(BTST, btst)
2096 BITOP(BNOT, bnot)
2097
2098 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2099 {
2100 TCGv bit;
2101 DisasCompare dc;
2102 dc.temp = tcg_temp_new();
2103 bit = tcg_temp_new();
2104 psw_cond(&dc, cond);
2105 tcg_gen_andi_i32(val, val, ~(1 << pos));
2106 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2107 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2108 tcg_temp_free(bit);
2109 tcg_temp_free(dc.temp);
2110 }
2111
2112 /* bmcnd #imm, dsp[rd] */
2113 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2114 {
2115 TCGv val, mem, addr;
2116 val = tcg_temp_new();
2117 mem = tcg_temp_new();
2118 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2119 rx_gen_ld(MO_8, val, addr);
2120 bmcnd_op(val, a->cd, a->imm);
2121 rx_gen_st(MO_8, val, addr);
2122 tcg_temp_free(val);
2123 tcg_temp_free(mem);
2124 return true;
2125 }
2126
2127 /* bmcond #imm, rd */
2128 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2129 {
2130 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2131 return true;
2132 }
2133
2134 enum {
2135 PSW_C = 0,
2136 PSW_Z = 1,
2137 PSW_S = 2,
2138 PSW_O = 3,
2139 PSW_I = 8,
2140 PSW_U = 9,
2141 };
2142
2143 static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2144 {
2145 if (cb < 8) {
2146 switch (cb) {
2147 case PSW_C:
2148 tcg_gen_movi_i32(cpu_psw_c, val);
2149 break;
2150 case PSW_Z:
2151 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2152 break;
2153 case PSW_S:
2154 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2155 break;
2156 case PSW_O:
2157 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2158 break;
2159 default:
2160 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2161 break;
2162 }
2163 } else if (is_privileged(ctx, 0)) {
2164 switch (cb) {
2165 case PSW_I:
2166 tcg_gen_movi_i32(cpu_psw_i, val);
2167 ctx->base.is_jmp = DISAS_UPDATE;
2168 break;
2169 case PSW_U:
2170 tcg_gen_movi_i32(cpu_psw_u, val);
2171 break;
2172 default:
2173 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2174 break;
2175 }
2176 }
2177 }
2178
2179 /* clrpsw psw */
2180 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2181 {
2182 clrsetpsw(ctx, a->cb, 0);
2183 return true;
2184 }
2185
2186 /* setpsw psw */
2187 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2188 {
2189 clrsetpsw(ctx, a->cb, 1);
2190 return true;
2191 }
2192
2193 /* mvtipl #imm */
2194 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2195 {
2196 if (is_privileged(ctx, 1)) {
2197 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2198 ctx->base.is_jmp = DISAS_UPDATE;
2199 }
2200 return true;
2201 }
2202
2203 /* mvtc #imm, rd */
2204 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2205 {
2206 TCGv imm;
2207
2208 imm = tcg_const_i32(a->imm);
2209 move_to_cr(ctx, imm, a->cr);
2210 if (a->cr == 0 && is_privileged(ctx, 0)) {
2211 ctx->base.is_jmp = DISAS_UPDATE;
2212 }
2213 tcg_temp_free(imm);
2214 return true;
2215 }
2216
2217 /* mvtc rs, rd */
2218 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2219 {
2220 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2221 if (a->cr == 0 && is_privileged(ctx, 0)) {
2222 ctx->base.is_jmp = DISAS_UPDATE;
2223 }
2224 return true;
2225 }
2226
2227 /* mvfc rs, rd */
2228 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2229 {
2230 move_from_cr(cpu_regs[a->rd], a->cr, ctx->pc);
2231 return true;
2232 }
2233
2234 /* rtfi */
2235 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2236 {
2237 TCGv psw;
2238 if (is_privileged(ctx, 1)) {
2239 psw = tcg_temp_new();
2240 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2241 tcg_gen_mov_i32(psw, cpu_bpsw);
2242 gen_helper_set_psw_rte(cpu_env, psw);
2243 ctx->base.is_jmp = DISAS_EXIT;
2244 tcg_temp_free(psw);
2245 }
2246 return true;
2247 }
2248
2249 /* rte */
2250 static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2251 {
2252 TCGv psw;
2253 if (is_privileged(ctx, 1)) {
2254 psw = tcg_temp_new();
2255 pop(cpu_pc);
2256 pop(psw);
2257 gen_helper_set_psw_rte(cpu_env, psw);
2258 ctx->base.is_jmp = DISAS_EXIT;
2259 tcg_temp_free(psw);
2260 }
2261 return true;
2262 }
2263
2264 /* brk */
2265 static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2266 {
2267 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2268 gen_helper_rxbrk(cpu_env);
2269 ctx->base.is_jmp = DISAS_NORETURN;
2270 return true;
2271 }
2272
2273 /* int #imm */
2274 static bool trans_INT(DisasContext *ctx, arg_INT *a)
2275 {
2276 TCGv vec;
2277
2278 tcg_debug_assert(a->imm < 0x100);
2279 vec = tcg_const_i32(a->imm);
2280 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2281 gen_helper_rxint(cpu_env, vec);
2282 tcg_temp_free(vec);
2283 ctx->base.is_jmp = DISAS_NORETURN;
2284 return true;
2285 }
2286
2287 /* wait */
2288 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2289 {
2290 if (is_privileged(ctx, 1)) {
2291 tcg_gen_addi_i32(cpu_pc, cpu_pc, 2);
2292 gen_helper_wait(cpu_env);
2293 }
2294 return true;
2295 }
2296
2297 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2298 {
2299 CPURXState *env = cs->env_ptr;
2300 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2301 ctx->env = env;
2302 }
2303
2304 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2305 {
2306 }
2307
2308 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2309 {
2310 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2311
2312 tcg_gen_insn_start(ctx->base.pc_next);
2313 }
2314
2315 static bool rx_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
2316 const CPUBreakpoint *bp)
2317 {
2318 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2319
2320 /* We have hit a breakpoint - make sure PC is up-to-date */
2321 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2322 gen_helper_debug(cpu_env);
2323 ctx->base.is_jmp = DISAS_NORETURN;
2324 ctx->base.pc_next += 1;
2325 return true;
2326 }
2327
2328 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2329 {
2330 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2331 uint32_t insn;
2332
2333 ctx->pc = ctx->base.pc_next;
2334 insn = decode_load(ctx);
2335 if (!decode(ctx, insn)) {
2336 gen_helper_raise_illegal_instruction(cpu_env);
2337 }
2338 }
2339
2340 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2341 {
2342 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2343
2344 switch (ctx->base.is_jmp) {
2345 case DISAS_NEXT:
2346 case DISAS_TOO_MANY:
2347 gen_goto_tb(ctx, 0, dcbase->pc_next);
2348 break;
2349 case DISAS_JUMP:
2350 if (ctx->base.singlestep_enabled) {
2351 gen_helper_debug(cpu_env);
2352 } else {
2353 tcg_gen_lookup_and_goto_ptr();
2354 }
2355 break;
2356 case DISAS_UPDATE:
2357 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2358 case DISAS_EXIT:
2359 tcg_gen_exit_tb(NULL, 0);
2360 break;
2361 case DISAS_NORETURN:
2362 break;
2363 default:
2364 g_assert_not_reached();
2365 }
2366 }
2367
2368 static void rx_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2369 {
2370 qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
2371 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2372 }
2373
2374 static const TranslatorOps rx_tr_ops = {
2375 .init_disas_context = rx_tr_init_disas_context,
2376 .tb_start = rx_tr_tb_start,
2377 .insn_start = rx_tr_insn_start,
2378 .breakpoint_check = rx_tr_breakpoint_check,
2379 .translate_insn = rx_tr_translate_insn,
2380 .tb_stop = rx_tr_tb_stop,
2381 .disas_log = rx_tr_disas_log,
2382 };
2383
2384 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
2385 {
2386 DisasContext dc;
2387
2388 translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
2389 }
2390
2391 void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
2392 target_ulong *data)
2393 {
2394 env->pc = data[0];
2395 }
2396
2397 #define ALLOC_REGISTER(sym, name) \
2398 cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
2399 offsetof(CPURXState, sym), name)
2400
2401 void rx_translate_init(void)
2402 {
2403 static const char * const regnames[NUM_REGS] = {
2404 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2405 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2406 };
2407 int i;
2408
2409 for (i = 0; i < NUM_REGS; i++) {
2410 cpu_regs[i] = tcg_global_mem_new_i32(cpu_env,
2411 offsetof(CPURXState, regs[i]),
2412 regnames[i]);
2413 }
2414 ALLOC_REGISTER(pc, "PC");
2415 ALLOC_REGISTER(psw_o, "PSW(O)");
2416 ALLOC_REGISTER(psw_s, "PSW(S)");
2417 ALLOC_REGISTER(psw_z, "PSW(Z)");
2418 ALLOC_REGISTER(psw_c, "PSW(C)");
2419 ALLOC_REGISTER(psw_u, "PSW(U)");
2420 ALLOC_REGISTER(psw_i, "PSW(I)");
2421 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2422 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2423 ALLOC_REGISTER(usp, "USP");
2424 ALLOC_REGISTER(fpsw, "FPSW");
2425 ALLOC_REGISTER(bpsw, "BPSW");
2426 ALLOC_REGISTER(bpc, "BPC");
2427 ALLOC_REGISTER(isp, "ISP");
2428 ALLOC_REGISTER(fintv, "FINTV");
2429 ALLOC_REGISTER(intb, "INTB");
2430 cpu_acc = tcg_global_mem_new_i64(cpu_env,
2431 offsetof(CPURXState, acc), "ACC");
2432 }