]>
git.ipfire.org Git - thirdparty/qemu.git/blob - target/rx/translate.c
6b52424d0f860888e42b04593f5d6b861f4c2097
4 * Copyright (c) 2019 Yoshinori Sato
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "trace-tcg.h"
32 typedef struct DisasContext
{
33 DisasContextBase base
;
38 typedef struct DisasCompare
{
44 const char rx_crname
[][6] = {
45 "psw", "pc", "usp", "fpsw", "", "", "", "",
46 "bpsw", "bpc", "isp", "fintv", "intb", "", "", "",
49 /* Target-specific values for dc->base.is_jmp. */
50 #define DISAS_JUMP DISAS_TARGET_0
51 #define DISAS_UPDATE DISAS_TARGET_1
52 #define DISAS_EXIT DISAS_TARGET_2
54 /* global register indexes */
55 static TCGv cpu_regs
[16];
56 static TCGv cpu_psw_o
, cpu_psw_s
, cpu_psw_z
, cpu_psw_c
;
57 static TCGv cpu_psw_i
, cpu_psw_pm
, cpu_psw_u
, cpu_psw_ipl
;
58 static TCGv cpu_usp
, cpu_fpsw
, cpu_bpsw
, cpu_bpc
, cpu_isp
;
59 static TCGv cpu_fintv
, cpu_intb
, cpu_pc
;
60 static TCGv_i64 cpu_acc
;
62 #define cpu_sp cpu_regs[0]
64 #include "exec/gen-icount.h"
67 static uint32_t decode_load_bytes(DisasContext
*ctx
, uint32_t insn
,
71 uint8_t b
= cpu_ldub_code(ctx
->env
, ctx
->base
.pc_next
++);
72 insn
|= b
<< (32 - i
* 8);
77 static uint32_t li(DisasContext
*ctx
, int sz
)
80 CPURXState
*env
= ctx
->env
;
81 addr
= ctx
->base
.pc_next
;
83 tcg_debug_assert(sz
< 4);
86 ctx
->base
.pc_next
+= 1;
87 return cpu_ldsb_code(env
, addr
);
89 ctx
->base
.pc_next
+= 2;
90 return cpu_ldsw_code(env
, addr
);
92 ctx
->base
.pc_next
+= 3;
93 tmp
= cpu_ldsb_code(env
, addr
+ 2) << 16;
94 tmp
|= cpu_lduw_code(env
, addr
) & 0xffff;
97 ctx
->base
.pc_next
+= 4;
98 return cpu_ldl_code(env
, addr
);
103 static int bdsp_s(DisasContext
*ctx
, int d
)
119 /* Include the auto-generated decoder. */
120 #include "decode.inc.c"
122 void rx_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
124 RXCPU
*cpu
= RXCPU(cs
);
125 CPURXState
*env
= &cpu
->env
;
129 psw
= rx_cpu_pack_psw(env
);
130 qemu_fprintf(f
, "pc=0x%08x psw=0x%08x\n",
132 for (i
= 0; i
< 16; i
+= 4) {
133 qemu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
134 i
, env
->regs
[i
], i
+ 1, env
->regs
[i
+ 1],
135 i
+ 2, env
->regs
[i
+ 2], i
+ 3, env
->regs
[i
+ 3]);
139 static bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
141 if (unlikely(dc
->base
.singlestep_enabled
)) {
148 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
150 if (use_goto_tb(dc
, dest
)) {
152 tcg_gen_movi_i32(cpu_pc
, dest
);
153 tcg_gen_exit_tb(dc
->base
.tb
, n
);
155 tcg_gen_movi_i32(cpu_pc
, dest
);
156 if (dc
->base
.singlestep_enabled
) {
157 gen_helper_debug(cpu_env
);
159 tcg_gen_lookup_and_goto_ptr();
162 dc
->base
.is_jmp
= DISAS_NORETURN
;
165 /* generic load wrapper */
166 static inline void rx_gen_ld(unsigned int size
, TCGv reg
, TCGv mem
)
168 tcg_gen_qemu_ld_i32(reg
, mem
, 0, size
| MO_SIGN
| MO_TE
);
171 /* unsigned load wrapper */
172 static inline void rx_gen_ldu(unsigned int size
, TCGv reg
, TCGv mem
)
174 tcg_gen_qemu_ld_i32(reg
, mem
, 0, size
| MO_TE
);
177 /* generic store wrapper */
178 static inline void rx_gen_st(unsigned int size
, TCGv reg
, TCGv mem
)
180 tcg_gen_qemu_st_i32(reg
, mem
, 0, size
| MO_TE
);
184 static inline void rx_gen_regindex(DisasContext
*ctx
, TCGv mem
,
185 int size
, int ri
, int rb
)
187 tcg_gen_shli_i32(mem
, cpu_regs
[ri
], size
);
188 tcg_gen_add_i32(mem
, mem
, cpu_regs
[rb
]);
192 static inline TCGv
rx_index_addr(DisasContext
*ctx
, TCGv mem
,
193 int ld
, int size
, int reg
)
197 tcg_debug_assert(ld
< 3);
200 return cpu_regs
[reg
];
202 dsp
= cpu_ldub_code(ctx
->env
, ctx
->base
.pc_next
) << size
;
203 tcg_gen_addi_i32(mem
, cpu_regs
[reg
], dsp
);
204 ctx
->base
.pc_next
+= 1;
207 dsp
= cpu_lduw_code(ctx
->env
, ctx
->base
.pc_next
) << size
;
208 tcg_gen_addi_i32(mem
, cpu_regs
[reg
], dsp
);
209 ctx
->base
.pc_next
+= 2;
215 static inline MemOp
mi_to_mop(unsigned mi
)
217 static const MemOp mop
[5] = { MO_SB
, MO_SW
, MO_UL
, MO_UW
, MO_UB
};
218 tcg_debug_assert(mi
< 5);
222 /* load source operand */
223 static inline TCGv
rx_load_source(DisasContext
*ctx
, TCGv mem
,
224 int ld
, int mi
, int rs
)
230 addr
= rx_index_addr(ctx
, mem
, ld
, mop
& MO_SIZE
, rs
);
231 tcg_gen_qemu_ld_i32(mem
, addr
, 0, mop
| MO_TE
);
238 /* Processor mode check */
239 static int is_privileged(DisasContext
*ctx
, int is_exception
)
241 if (FIELD_EX32(ctx
->base
.tb
->flags
, PSW
, PM
)) {
243 gen_helper_raise_privilege_violation(cpu_env
);
251 /* generate QEMU condition */
252 static void psw_cond(DisasCompare
*dc
, uint32_t cond
)
254 tcg_debug_assert(cond
< 16);
257 dc
->cond
= TCG_COND_EQ
;
258 dc
->value
= cpu_psw_z
;
261 dc
->cond
= TCG_COND_NE
;
262 dc
->value
= cpu_psw_z
;
265 dc
->cond
= TCG_COND_NE
;
266 dc
->value
= cpu_psw_c
;
269 dc
->cond
= TCG_COND_EQ
;
270 dc
->value
= cpu_psw_c
;
272 case 4: /* gtu (C& ~Z) == 1 */
273 case 5: /* leu (C& ~Z) == 0 */
274 tcg_gen_setcondi_i32(TCG_COND_NE
, dc
->temp
, cpu_psw_z
, 0);
275 tcg_gen_and_i32(dc
->temp
, dc
->temp
, cpu_psw_c
);
276 dc
->cond
= (cond
== 4) ? TCG_COND_NE
: TCG_COND_EQ
;
277 dc
->value
= dc
->temp
;
279 case 6: /* pz (S == 0) */
280 dc
->cond
= TCG_COND_GE
;
281 dc
->value
= cpu_psw_s
;
283 case 7: /* n (S == 1) */
284 dc
->cond
= TCG_COND_LT
;
285 dc
->value
= cpu_psw_s
;
287 case 8: /* ge (S^O)==0 */
288 case 9: /* lt (S^O)==1 */
289 tcg_gen_xor_i32(dc
->temp
, cpu_psw_o
, cpu_psw_s
);
290 dc
->cond
= (cond
== 8) ? TCG_COND_GE
: TCG_COND_LT
;
291 dc
->value
= dc
->temp
;
293 case 10: /* gt ((S^O)|Z)==0 */
294 case 11: /* le ((S^O)|Z)==1 */
295 tcg_gen_xor_i32(dc
->temp
, cpu_psw_o
, cpu_psw_s
);
296 tcg_gen_sari_i32(dc
->temp
, dc
->temp
, 31);
297 tcg_gen_andc_i32(dc
->temp
, cpu_psw_z
, dc
->temp
);
298 dc
->cond
= (cond
== 10) ? TCG_COND_NE
: TCG_COND_EQ
;
299 dc
->value
= dc
->temp
;
302 dc
->cond
= TCG_COND_LT
;
303 dc
->value
= cpu_psw_o
;
306 dc
->cond
= TCG_COND_GE
;
307 dc
->value
= cpu_psw_o
;
309 case 14: /* always true */
310 dc
->cond
= TCG_COND_ALWAYS
;
311 dc
->value
= dc
->temp
;
313 case 15: /* always false */
314 dc
->cond
= TCG_COND_NEVER
;
315 dc
->value
= dc
->temp
;
320 static void move_from_cr(TCGv ret
, int cr
, uint32_t pc
)
322 TCGv z
= tcg_const_i32(0);
325 gen_helper_pack_psw(ret
, cpu_env
);
328 tcg_gen_movi_i32(ret
, pc
);
331 tcg_gen_movcond_i32(TCG_COND_NE
, ret
,
332 cpu_psw_u
, z
, cpu_sp
, cpu_usp
);
335 tcg_gen_mov_i32(ret
, cpu_fpsw
);
338 tcg_gen_mov_i32(ret
, cpu_bpsw
);
341 tcg_gen_mov_i32(ret
, cpu_bpc
);
344 tcg_gen_movcond_i32(TCG_COND_EQ
, ret
,
345 cpu_psw_u
, z
, cpu_sp
, cpu_isp
);
348 tcg_gen_mov_i32(ret
, cpu_fintv
);
351 tcg_gen_mov_i32(ret
, cpu_intb
);
354 qemu_log_mask(LOG_GUEST_ERROR
, "Unimplement control register %d", cr
);
355 /* Unimplement registers return 0 */
356 tcg_gen_movi_i32(ret
, 0);
362 static void move_to_cr(DisasContext
*ctx
, TCGv val
, int cr
)
365 if (cr
>= 8 && !is_privileged(ctx
, 0)) {
366 /* Some control registers can only be written in privileged mode. */
367 qemu_log_mask(LOG_GUEST_ERROR
,
368 "disallow control register write %s", rx_crname
[cr
]);
371 z
= tcg_const_i32(0);
374 gen_helper_set_psw(cpu_env
, val
);
376 /* case 1: to PC not supported */
378 tcg_gen_mov_i32(cpu_usp
, val
);
379 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_sp
,
380 cpu_psw_u
, z
, cpu_usp
, cpu_sp
);
383 gen_helper_set_fpsw(cpu_env
, val
);
386 tcg_gen_mov_i32(cpu_bpsw
, val
);
389 tcg_gen_mov_i32(cpu_bpc
, val
);
392 tcg_gen_mov_i32(cpu_isp
, val
);
393 /* if PSW.U is 0, copy isp to r0 */
394 tcg_gen_movcond_i32(TCG_COND_EQ
, cpu_sp
,
395 cpu_psw_u
, z
, cpu_isp
, cpu_sp
);
398 tcg_gen_mov_i32(cpu_fintv
, val
);
401 tcg_gen_mov_i32(cpu_intb
, val
);
404 qemu_log_mask(LOG_GUEST_ERROR
,
405 "Unimplement control register %d", cr
);
411 static void push(TCGv val
)
413 tcg_gen_subi_i32(cpu_sp
, cpu_sp
, 4);
414 rx_gen_st(MO_32
, val
, cpu_sp
);
417 static void pop(TCGv ret
)
419 rx_gen_ld(MO_32
, ret
, cpu_sp
);
420 tcg_gen_addi_i32(cpu_sp
, cpu_sp
, 4);
423 /* mov.<bwl> rs,dsp5[rd] */
424 static bool trans_MOV_rm(DisasContext
*ctx
, arg_MOV_rm
*a
)
427 mem
= tcg_temp_new();
428 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rd
], a
->dsp
<< a
->sz
);
429 rx_gen_st(a
->sz
, cpu_regs
[a
->rs
], mem
);
434 /* mov.<bwl> dsp5[rs],rd */
435 static bool trans_MOV_mr(DisasContext
*ctx
, arg_MOV_mr
*a
)
438 mem
= tcg_temp_new();
439 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rs
], a
->dsp
<< a
->sz
);
440 rx_gen_ld(a
->sz
, cpu_regs
[a
->rd
], mem
);
445 /* mov.l #uimm4,rd */
446 /* mov.l #uimm8,rd */
448 static bool trans_MOV_ir(DisasContext
*ctx
, arg_MOV_ir
*a
)
450 tcg_gen_movi_i32(cpu_regs
[a
->rd
], a
->imm
);
454 /* mov.<bwl> #uimm8,dsp[rd] */
455 /* mov.<bwl> #imm, dsp[rd] */
456 static bool trans_MOV_im(DisasContext
*ctx
, arg_MOV_im
*a
)
459 imm
= tcg_const_i32(a
->imm
);
460 mem
= tcg_temp_new();
461 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rd
], a
->dsp
<< a
->sz
);
462 rx_gen_st(a
->sz
, imm
, mem
);
468 /* mov.<bwl> [ri,rb],rd */
469 static bool trans_MOV_ar(DisasContext
*ctx
, arg_MOV_ar
*a
)
472 mem
= tcg_temp_new();
473 rx_gen_regindex(ctx
, mem
, a
->sz
, a
->ri
, a
->rb
);
474 rx_gen_ld(a
->sz
, cpu_regs
[a
->rd
], mem
);
479 /* mov.<bwl> rd,[ri,rb] */
480 static bool trans_MOV_ra(DisasContext
*ctx
, arg_MOV_ra
*a
)
483 mem
= tcg_temp_new();
484 rx_gen_regindex(ctx
, mem
, a
->sz
, a
->ri
, a
->rb
);
485 rx_gen_st(a
->sz
, cpu_regs
[a
->rs
], mem
);
490 /* mov.<bwl> dsp[rs],dsp[rd] */
491 /* mov.<bwl> rs,dsp[rd] */
492 /* mov.<bwl> dsp[rs],rd */
493 /* mov.<bwl> rs,rd */
494 static bool trans_MOV_mm(DisasContext
*ctx
, arg_MOV_mm
*a
)
496 static void (* const mov
[])(TCGv ret
, TCGv arg
) = {
497 tcg_gen_ext8s_i32
, tcg_gen_ext16s_i32
, tcg_gen_mov_i32
,
500 if (a
->lds
== 3 && a
->ldd
== 3) {
501 /* mov.<bwl> rs,rd */
502 mov
[a
->sz
](cpu_regs
[a
->rd
], cpu_regs
[a
->rs
]);
506 mem
= tcg_temp_new();
508 /* mov.<bwl> rs,dsp[rd] */
509 addr
= rx_index_addr(ctx
, mem
, a
->ldd
, a
->sz
, a
->rs
);
510 rx_gen_st(a
->sz
, cpu_regs
[a
->rd
], addr
);
511 } else if (a
->ldd
== 3) {
512 /* mov.<bwl> dsp[rs],rd */
513 addr
= rx_index_addr(ctx
, mem
, a
->lds
, a
->sz
, a
->rs
);
514 rx_gen_ld(a
->sz
, cpu_regs
[a
->rd
], addr
);
516 /* mov.<bwl> dsp[rs],dsp[rd] */
517 tmp
= tcg_temp_new();
518 addr
= rx_index_addr(ctx
, mem
, a
->lds
, a
->sz
, a
->rs
);
519 rx_gen_ld(a
->sz
, tmp
, addr
);
520 addr
= rx_index_addr(ctx
, mem
, a
->ldd
, a
->sz
, a
->rd
);
521 rx_gen_st(a
->sz
, tmp
, addr
);
528 /* mov.<bwl> rs,[rd+] */
529 /* mov.<bwl> rs,[-rd] */
530 static bool trans_MOV_rp(DisasContext
*ctx
, arg_MOV_rp
*a
)
533 val
= tcg_temp_new();
534 tcg_gen_mov_i32(val
, cpu_regs
[a
->rs
]);
536 tcg_gen_subi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
538 rx_gen_st(a
->sz
, val
, cpu_regs
[a
->rd
]);
540 tcg_gen_addi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
546 /* mov.<bwl> [rd+],rs */
547 /* mov.<bwl> [-rd],rs */
548 static bool trans_MOV_pr(DisasContext
*ctx
, arg_MOV_pr
*a
)
551 val
= tcg_temp_new();
553 tcg_gen_subi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
555 rx_gen_ld(a
->sz
, val
, cpu_regs
[a
->rd
]);
557 tcg_gen_addi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
559 tcg_gen_mov_i32(cpu_regs
[a
->rs
], val
);
564 /* movu.<bw> dsp5[rs],rd */
565 /* movu.<bw> dsp[rs],rd */
566 static bool trans_MOVU_mr(DisasContext
*ctx
, arg_MOVU_mr
*a
)
569 mem
= tcg_temp_new();
570 tcg_gen_addi_i32(mem
, cpu_regs
[a
->rs
], a
->dsp
<< a
->sz
);
571 rx_gen_ldu(a
->sz
, cpu_regs
[a
->rd
], mem
);
576 /* movu.<bw> rs,rd */
577 static bool trans_MOVU_rr(DisasContext
*ctx
, arg_MOVU_rr
*a
)
579 static void (* const ext
[])(TCGv ret
, TCGv arg
) = {
580 tcg_gen_ext8u_i32
, tcg_gen_ext16u_i32
,
582 ext
[a
->sz
](cpu_regs
[a
->rd
], cpu_regs
[a
->rs
]);
586 /* movu.<bw> [ri,rb],rd */
587 static bool trans_MOVU_ar(DisasContext
*ctx
, arg_MOVU_ar
*a
)
590 mem
= tcg_temp_new();
591 rx_gen_regindex(ctx
, mem
, a
->sz
, a
->ri
, a
->rb
);
592 rx_gen_ldu(a
->sz
, cpu_regs
[a
->rd
], mem
);
597 /* movu.<bw> [rd+],rs */
598 /* mov.<bw> [-rd],rs */
599 static bool trans_MOVU_pr(DisasContext
*ctx
, arg_MOVU_pr
*a
)
602 val
= tcg_temp_new();
604 tcg_gen_subi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
606 rx_gen_ldu(a
->sz
, val
, cpu_regs
[a
->rd
]);
608 tcg_gen_addi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1 << a
->sz
);
610 tcg_gen_mov_i32(cpu_regs
[a
->rs
], val
);
617 static bool trans_POP(DisasContext
*ctx
, arg_POP
*a
)
619 /* mov.l [r0+], rd */
625 trans_MOV_pr(ctx
, &mov_a
);
630 static bool trans_POPC(DisasContext
*ctx
, arg_POPC
*a
)
633 val
= tcg_temp_new();
635 move_to_cr(ctx
, val
, a
->cr
);
636 if (a
->cr
== 0 && is_privileged(ctx
, 0)) {
637 /* PSW.I may be updated here. exit TB. */
638 ctx
->base
.is_jmp
= DISAS_UPDATE
;
645 static bool trans_POPM(DisasContext
*ctx
, arg_POPM
*a
)
648 if (a
->rd
== 0 || a
->rd
>= a
->rd2
) {
649 qemu_log_mask(LOG_GUEST_ERROR
,
650 "Invalid register ranges r%d-r%d", a
->rd
, a
->rd2
);
653 while (r
<= a
->rd2
&& r
< 16) {
661 static bool trans_PUSH_r(DisasContext
*ctx
, arg_PUSH_r
*a
)
664 val
= tcg_temp_new();
665 tcg_gen_mov_i32(val
, cpu_regs
[a
->rs
]);
666 tcg_gen_subi_i32(cpu_sp
, cpu_sp
, 4);
667 rx_gen_st(a
->sz
, val
, cpu_sp
);
672 /* push.<bwl> dsp[rs] */
673 static bool trans_PUSH_m(DisasContext
*ctx
, arg_PUSH_m
*a
)
676 mem
= tcg_temp_new();
677 val
= tcg_temp_new();
678 addr
= rx_index_addr(ctx
, mem
, a
->ld
, a
->sz
, a
->rs
);
679 rx_gen_ld(a
->sz
, val
, addr
);
680 tcg_gen_subi_i32(cpu_sp
, cpu_sp
, 4);
681 rx_gen_st(a
->sz
, val
, cpu_sp
);
688 static bool trans_PUSHC(DisasContext
*ctx
, arg_PUSHC
*a
)
691 val
= tcg_temp_new();
692 move_from_cr(val
, a
->cr
, ctx
->pc
);
699 static bool trans_PUSHM(DisasContext
*ctx
, arg_PUSHM
*a
)
703 if (a
->rs
== 0 || a
->rs
>= a
->rs2
) {
704 qemu_log_mask(LOG_GUEST_ERROR
,
705 "Invalid register ranges r%d-r%d", a
->rs
, a
->rs2
);
708 while (r
>= a
->rs
&& r
>= 0) {
715 static bool trans_XCHG_rr(DisasContext
*ctx
, arg_XCHG_rr
*a
)
718 tmp
= tcg_temp_new();
719 tcg_gen_mov_i32(tmp
, cpu_regs
[a
->rs
]);
720 tcg_gen_mov_i32(cpu_regs
[a
->rs
], cpu_regs
[a
->rd
]);
721 tcg_gen_mov_i32(cpu_regs
[a
->rd
], tmp
);
726 /* xchg dsp[rs].<mi>,rd */
727 static bool trans_XCHG_mr(DisasContext
*ctx
, arg_XCHG_mr
*a
)
730 mem
= tcg_temp_new();
732 case 0: /* dsp[rs].b */
733 case 1: /* dsp[rs].w */
734 case 2: /* dsp[rs].l */
735 addr
= rx_index_addr(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
737 case 3: /* dsp[rs].uw */
738 case 4: /* dsp[rs].ub */
739 addr
= rx_index_addr(ctx
, mem
, a
->ld
, 4 - a
->mi
, a
->rs
);
742 g_assert_not_reached();
744 tcg_gen_atomic_xchg_i32(cpu_regs
[a
->rd
], addr
, cpu_regs
[a
->rd
],
745 0, mi_to_mop(a
->mi
));
750 static inline void stcond(TCGCond cond
, int rd
, int imm
)
754 z
= tcg_const_i32(0);
755 _imm
= tcg_const_i32(imm
);
756 tcg_gen_movcond_i32(cond
, cpu_regs
[rd
], cpu_psw_z
, z
,
763 static bool trans_STZ(DisasContext
*ctx
, arg_STZ
*a
)
765 stcond(TCG_COND_EQ
, a
->rd
, a
->imm
);
770 static bool trans_STNZ(DisasContext
*ctx
, arg_STNZ
*a
)
772 stcond(TCG_COND_NE
, a
->rd
, a
->imm
);
777 /* sccnd.<bwl> dsp:[rd] */
778 static bool trans_SCCnd(DisasContext
*ctx
, arg_SCCnd
*a
)
782 dc
.temp
= tcg_temp_new();
783 psw_cond(&dc
, a
->cd
);
785 val
= tcg_temp_new();
786 mem
= tcg_temp_new();
787 tcg_gen_setcondi_i32(dc
.cond
, val
, dc
.value
, 0);
788 addr
= rx_index_addr(ctx
, mem
, a
->sz
, a
->ld
, a
->rd
);
789 rx_gen_st(a
->sz
, val
, addr
);
793 tcg_gen_setcondi_i32(dc
.cond
, cpu_regs
[a
->rd
], dc
.value
, 0);
795 tcg_temp_free(dc
.temp
);
800 static bool trans_RTSD_i(DisasContext
*ctx
, arg_RTSD_i
*a
)
802 tcg_gen_addi_i32(cpu_sp
, cpu_sp
, a
->imm
<< 2);
804 ctx
->base
.is_jmp
= DISAS_JUMP
;
808 /* rtsd #imm, rd-rd2 */
809 static bool trans_RTSD_irr(DisasContext
*ctx
, arg_RTSD_irr
*a
)
814 if (a
->rd2
>= a
->rd
) {
815 adj
= a
->imm
- (a
->rd2
- a
->rd
+ 1);
817 adj
= a
->imm
- (15 - a
->rd
+ 1);
820 tcg_gen_addi_i32(cpu_sp
, cpu_sp
, adj
<< 2);
822 while (dst
<= a
->rd2
&& dst
< 16) {
823 pop(cpu_regs
[dst
++]);
826 ctx
->base
.is_jmp
= DISAS_JUMP
;
830 typedef void (*op2fn
)(TCGv ret
, TCGv arg1
);
831 typedef void (*op3fn
)(TCGv ret
, TCGv arg1
, TCGv arg2
);
833 static inline void rx_gen_op_rr(op2fn opr
, int dst
, int src
)
835 opr(cpu_regs
[dst
], cpu_regs
[src
]);
838 static inline void rx_gen_op_rrr(op3fn opr
, int dst
, int src
, int src2
)
840 opr(cpu_regs
[dst
], cpu_regs
[src
], cpu_regs
[src2
]);
843 static inline void rx_gen_op_irr(op3fn opr
, int dst
, int src
, uint32_t src2
)
845 TCGv imm
= tcg_const_i32(src2
);
846 opr(cpu_regs
[dst
], cpu_regs
[src
], imm
);
850 static inline void rx_gen_op_mr(op3fn opr
, DisasContext
*ctx
,
851 int dst
, int src
, int ld
, int mi
)
854 mem
= tcg_temp_new();
855 val
= rx_load_source(ctx
, mem
, ld
, mi
, src
);
856 opr(cpu_regs
[dst
], cpu_regs
[dst
], val
);
860 static void rx_and(TCGv ret
, TCGv arg1
, TCGv arg2
)
862 tcg_gen_and_i32(cpu_psw_s
, arg1
, arg2
);
863 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
864 tcg_gen_mov_i32(ret
, cpu_psw_s
);
867 /* and #uimm:4, rd */
869 static bool trans_AND_ir(DisasContext
*ctx
, arg_AND_ir
*a
)
871 rx_gen_op_irr(rx_and
, a
->rd
, a
->rd
, a
->imm
);
875 /* and dsp[rs], rd */
877 static bool trans_AND_mr(DisasContext
*ctx
, arg_AND_mr
*a
)
879 rx_gen_op_mr(rx_and
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
884 static bool trans_AND_rrr(DisasContext
*ctx
, arg_AND_rrr
*a
)
886 rx_gen_op_rrr(rx_and
, a
->rd
, a
->rs
, a
->rs2
);
890 static void rx_or(TCGv ret
, TCGv arg1
, TCGv arg2
)
892 tcg_gen_or_i32(cpu_psw_s
, arg1
, arg2
);
893 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
894 tcg_gen_mov_i32(ret
, cpu_psw_s
);
899 static bool trans_OR_ir(DisasContext
*ctx
, arg_OR_ir
*a
)
901 rx_gen_op_irr(rx_or
, a
->rd
, a
->rd
, a
->imm
);
907 static bool trans_OR_mr(DisasContext
*ctx
, arg_OR_mr
*a
)
909 rx_gen_op_mr(rx_or
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
914 static bool trans_OR_rrr(DisasContext
*ctx
, arg_OR_rrr
*a
)
916 rx_gen_op_rrr(rx_or
, a
->rd
, a
->rs
, a
->rs2
);
920 static void rx_xor(TCGv ret
, TCGv arg1
, TCGv arg2
)
922 tcg_gen_xor_i32(cpu_psw_s
, arg1
, arg2
);
923 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
924 tcg_gen_mov_i32(ret
, cpu_psw_s
);
928 static bool trans_XOR_ir(DisasContext
*ctx
, arg_XOR_ir
*a
)
930 rx_gen_op_irr(rx_xor
, a
->rd
, a
->rd
, a
->imm
);
934 /* xor dsp[rs], rd */
936 static bool trans_XOR_mr(DisasContext
*ctx
, arg_XOR_mr
*a
)
938 rx_gen_op_mr(rx_xor
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
942 static void rx_tst(TCGv ret
, TCGv arg1
, TCGv arg2
)
944 tcg_gen_and_i32(cpu_psw_s
, arg1
, arg2
);
945 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
949 static bool trans_TST_ir(DisasContext
*ctx
, arg_TST_ir
*a
)
951 rx_gen_op_irr(rx_tst
, a
->rd
, a
->rd
, a
->imm
);
955 /* tst dsp[rs], rd */
957 static bool trans_TST_mr(DisasContext
*ctx
, arg_TST_mr
*a
)
959 rx_gen_op_mr(rx_tst
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
963 static void rx_not(TCGv ret
, TCGv arg1
)
965 tcg_gen_not_i32(ret
, arg1
);
966 tcg_gen_mov_i32(cpu_psw_z
, ret
);
967 tcg_gen_mov_i32(cpu_psw_s
, ret
);
972 static bool trans_NOT_rr(DisasContext
*ctx
, arg_NOT_rr
*a
)
974 rx_gen_op_rr(rx_not
, a
->rd
, a
->rs
);
978 static void rx_neg(TCGv ret
, TCGv arg1
)
980 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_o
, arg1
, 0x80000000);
981 tcg_gen_neg_i32(ret
, arg1
);
982 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_c
, ret
, 0);
983 tcg_gen_mov_i32(cpu_psw_z
, ret
);
984 tcg_gen_mov_i32(cpu_psw_s
, ret
);
990 static bool trans_NEG_rr(DisasContext
*ctx
, arg_NEG_rr
*a
)
992 rx_gen_op_rr(rx_neg
, a
->rd
, a
->rs
);
996 /* ret = arg1 + arg2 + psw_c */
997 static void rx_adc(TCGv ret
, TCGv arg1
, TCGv arg2
)
1000 z
= tcg_const_i32(0);
1001 tcg_gen_add2_i32(cpu_psw_s
, cpu_psw_c
, arg1
, z
, cpu_psw_c
, z
);
1002 tcg_gen_add2_i32(cpu_psw_s
, cpu_psw_c
, cpu_psw_s
, cpu_psw_c
, arg2
, z
);
1003 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
1004 tcg_gen_xor_i32(cpu_psw_o
, cpu_psw_s
, arg1
);
1005 tcg_gen_xor_i32(z
, arg1
, arg2
);
1006 tcg_gen_andc_i32(cpu_psw_o
, cpu_psw_o
, z
);
1007 tcg_gen_mov_i32(ret
, cpu_psw_s
);
1012 static bool trans_ADC_ir(DisasContext
*ctx
, arg_ADC_ir
*a
)
1014 rx_gen_op_irr(rx_adc
, a
->rd
, a
->rd
, a
->imm
);
1019 static bool trans_ADC_rr(DisasContext
*ctx
, arg_ADC_rr
*a
)
1021 rx_gen_op_rrr(rx_adc
, a
->rd
, a
->rd
, a
->rs
);
1025 /* adc dsp[rs], rd */
1026 static bool trans_ADC_mr(DisasContext
*ctx
, arg_ADC_mr
*a
)
1032 rx_gen_op_mr(rx_adc
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1036 /* ret = arg1 + arg2 */
1037 static void rx_add(TCGv ret
, TCGv arg1
, TCGv arg2
)
1040 z
= tcg_const_i32(0);
1041 tcg_gen_add2_i32(cpu_psw_s
, cpu_psw_c
, arg1
, z
, arg2
, z
);
1042 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
1043 tcg_gen_xor_i32(cpu_psw_o
, cpu_psw_s
, arg1
);
1044 tcg_gen_xor_i32(z
, arg1
, arg2
);
1045 tcg_gen_andc_i32(cpu_psw_o
, cpu_psw_o
, z
);
1046 tcg_gen_mov_i32(ret
, cpu_psw_s
);
1050 /* add #uimm4, rd */
1051 /* add #imm, rs, rd */
1052 static bool trans_ADD_irr(DisasContext
*ctx
, arg_ADD_irr
*a
)
1054 rx_gen_op_irr(rx_add
, a
->rd
, a
->rs2
, a
->imm
);
1059 /* add dsp[rs], rd */
1060 static bool trans_ADD_mr(DisasContext
*ctx
, arg_ADD_mr
*a
)
1062 rx_gen_op_mr(rx_add
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1066 /* add rs, rs2, rd */
1067 static bool trans_ADD_rrr(DisasContext
*ctx
, arg_ADD_rrr
*a
)
1069 rx_gen_op_rrr(rx_add
, a
->rd
, a
->rs
, a
->rs2
);
1073 /* ret = arg1 - arg2 */
1074 static void rx_sub(TCGv ret
, TCGv arg1
, TCGv arg2
)
1077 tcg_gen_sub_i32(cpu_psw_s
, arg1
, arg2
);
1078 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_s
);
1079 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_psw_c
, arg1
, arg2
);
1080 tcg_gen_xor_i32(cpu_psw_o
, cpu_psw_s
, arg1
);
1081 temp
= tcg_temp_new_i32();
1082 tcg_gen_xor_i32(temp
, arg1
, arg2
);
1083 tcg_gen_and_i32(cpu_psw_o
, cpu_psw_o
, temp
);
1084 tcg_temp_free_i32(temp
);
1085 /* CMP not requred return */
1087 tcg_gen_mov_i32(ret
, cpu_psw_s
);
1090 static void rx_cmp(TCGv dummy
, TCGv arg1
, TCGv arg2
)
1092 rx_sub(NULL
, arg1
, arg2
);
1094 /* ret = arg1 - arg2 - !psw_c */
1095 /* -> ret = arg1 + ~arg2 + psw_c */
1096 static void rx_sbb(TCGv ret
, TCGv arg1
, TCGv arg2
)
1099 temp
= tcg_temp_new();
1100 tcg_gen_not_i32(temp
, arg2
);
1101 rx_adc(ret
, arg1
, temp
);
1102 tcg_temp_free(temp
);
1105 /* cmp #imm4, rs2 */
1106 /* cmp #imm8, rs2 */
1108 static bool trans_CMP_ir(DisasContext
*ctx
, arg_CMP_ir
*a
)
1110 rx_gen_op_irr(rx_cmp
, 0, a
->rs2
, a
->imm
);
1115 /* cmp dsp[rs], rs2 */
1116 static bool trans_CMP_mr(DisasContext
*ctx
, arg_CMP_mr
*a
)
1118 rx_gen_op_mr(rx_cmp
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1123 static bool trans_SUB_ir(DisasContext
*ctx
, arg_SUB_ir
*a
)
1125 rx_gen_op_irr(rx_sub
, a
->rd
, a
->rd
, a
->imm
);
1130 /* sub dsp[rs], rd */
1131 static bool trans_SUB_mr(DisasContext
*ctx
, arg_SUB_mr
*a
)
1133 rx_gen_op_mr(rx_sub
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1137 /* sub rs2, rs, rd */
1138 static bool trans_SUB_rrr(DisasContext
*ctx
, arg_SUB_rrr
*a
)
1140 rx_gen_op_rrr(rx_sub
, a
->rd
, a
->rs2
, a
->rs
);
1145 static bool trans_SBB_rr(DisasContext
*ctx
, arg_SBB_rr
*a
)
1147 rx_gen_op_rrr(rx_sbb
, a
->rd
, a
->rd
, a
->rs
);
1151 /* sbb dsp[rs], rd */
1152 static bool trans_SBB_mr(DisasContext
*ctx
, arg_SBB_mr
*a
)
1158 rx_gen_op_mr(rx_sbb
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1162 static void rx_abs(TCGv ret
, TCGv arg1
)
1166 neg
= tcg_temp_new();
1167 zero
= tcg_const_i32(0);
1168 tcg_gen_neg_i32(neg
, arg1
);
1169 tcg_gen_movcond_i32(TCG_COND_LT
, ret
, arg1
, zero
, neg
, arg1
);
1171 tcg_temp_free(zero
);
1176 static bool trans_ABS_rr(DisasContext
*ctx
, arg_ABS_rr
*a
)
1178 rx_gen_op_rr(rx_abs
, a
->rd
, a
->rs
);
1183 static bool trans_MAX_ir(DisasContext
*ctx
, arg_MAX_ir
*a
)
1185 rx_gen_op_irr(tcg_gen_smax_i32
, a
->rd
, a
->rd
, a
->imm
);
1190 /* max dsp[rs], rd */
1191 static bool trans_MAX_mr(DisasContext
*ctx
, arg_MAX_mr
*a
)
1193 rx_gen_op_mr(tcg_gen_smax_i32
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1198 static bool trans_MIN_ir(DisasContext
*ctx
, arg_MIN_ir
*a
)
1200 rx_gen_op_irr(tcg_gen_smin_i32
, a
->rd
, a
->rd
, a
->imm
);
1205 /* min dsp[rs], rd */
1206 static bool trans_MIN_mr(DisasContext
*ctx
, arg_MIN_mr
*a
)
1208 rx_gen_op_mr(tcg_gen_smin_i32
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1212 /* mul #uimm4, rd */
1214 static bool trans_MUL_ir(DisasContext
*ctx
, arg_MUL_ir
*a
)
1216 rx_gen_op_irr(tcg_gen_mul_i32
, a
->rd
, a
->rd
, a
->imm
);
1221 /* mul dsp[rs], rd */
1222 static bool trans_MUL_mr(DisasContext
*ctx
, arg_MUL_mr
*a
)
1224 rx_gen_op_mr(tcg_gen_mul_i32
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1228 /* mul rs, rs2, rd */
1229 static bool trans_MUL_rrr(DisasContext
*ctx
, arg_MUL_rrr
*a
)
1231 rx_gen_op_rrr(tcg_gen_mul_i32
, a
->rd
, a
->rs
, a
->rs2
);
1236 static bool trans_EMUL_ir(DisasContext
*ctx
, arg_EMUL_ir
*a
)
1238 TCGv imm
= tcg_const_i32(a
->imm
);
1240 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1242 tcg_gen_muls2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1243 cpu_regs
[a
->rd
], imm
);
1249 /* emul dsp[rs], rd */
1250 static bool trans_EMUL_mr(DisasContext
*ctx
, arg_EMUL_mr
*a
)
1254 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1256 mem
= tcg_temp_new();
1257 val
= rx_load_source(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
1258 tcg_gen_muls2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1259 cpu_regs
[a
->rd
], val
);
1264 /* emulu #imm, rd */
1265 static bool trans_EMULU_ir(DisasContext
*ctx
, arg_EMULU_ir
*a
)
1267 TCGv imm
= tcg_const_i32(a
->imm
);
1269 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1271 tcg_gen_mulu2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1272 cpu_regs
[a
->rd
], imm
);
1278 /* emulu dsp[rs], rd */
1279 static bool trans_EMULU_mr(DisasContext
*ctx
, arg_EMULU_mr
*a
)
1283 qemu_log_mask(LOG_GUEST_ERROR
, "rd too large %d", a
->rd
);
1285 mem
= tcg_temp_new();
1286 val
= rx_load_source(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
1287 tcg_gen_mulu2_i32(cpu_regs
[a
->rd
], cpu_regs
[(a
->rd
+ 1) & 15],
1288 cpu_regs
[a
->rd
], val
);
1293 static void rx_div(TCGv ret
, TCGv arg1
, TCGv arg2
)
1295 gen_helper_div(ret
, cpu_env
, arg1
, arg2
);
1298 static void rx_divu(TCGv ret
, TCGv arg1
, TCGv arg2
)
1300 gen_helper_divu(ret
, cpu_env
, arg1
, arg2
);
1304 static bool trans_DIV_ir(DisasContext
*ctx
, arg_DIV_ir
*a
)
1306 rx_gen_op_irr(rx_div
, a
->rd
, a
->rd
, a
->imm
);
1311 /* div dsp[rs], rd */
1312 static bool trans_DIV_mr(DisasContext
*ctx
, arg_DIV_mr
*a
)
1314 rx_gen_op_mr(rx_div
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1319 static bool trans_DIVU_ir(DisasContext
*ctx
, arg_DIVU_ir
*a
)
1321 rx_gen_op_irr(rx_divu
, a
->rd
, a
->rd
, a
->imm
);
1326 /* divu dsp[rs], rd */
1327 static bool trans_DIVU_mr(DisasContext
*ctx
, arg_DIVU_mr
*a
)
1329 rx_gen_op_mr(rx_divu
, ctx
, a
->rd
, a
->rs
, a
->ld
, a
->mi
);
1334 /* shll #imm:5, rd */
1335 /* shll #imm:5, rs2, rd */
1336 static bool trans_SHLL_irr(DisasContext
*ctx
, arg_SHLL_irr
*a
)
1339 tmp
= tcg_temp_new();
1341 tcg_gen_sari_i32(cpu_psw_c
, cpu_regs
[a
->rs2
], 32 - a
->imm
);
1342 tcg_gen_shli_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs2
], a
->imm
);
1343 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_o
, cpu_psw_c
, 0);
1344 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_psw_c
, 0xffffffff);
1345 tcg_gen_or_i32(cpu_psw_o
, cpu_psw_o
, tmp
);
1346 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, cpu_psw_c
, 0);
1348 tcg_gen_mov_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs2
]);
1349 tcg_gen_movi_i32(cpu_psw_c
, 0);
1350 tcg_gen_movi_i32(cpu_psw_o
, 0);
1352 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1353 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1358 static bool trans_SHLL_rr(DisasContext
*ctx
, arg_SHLL_rr
*a
)
1360 TCGLabel
*noshift
, *done
;
1363 noshift
= gen_new_label();
1364 done
= gen_new_label();
1365 /* if (cpu_regs[a->rs]) { */
1366 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_regs
[a
->rs
], 0, noshift
);
1367 count
= tcg_const_i32(32);
1368 tmp
= tcg_temp_new();
1369 tcg_gen_andi_i32(tmp
, cpu_regs
[a
->rs
], 31);
1370 tcg_gen_sub_i32(count
, count
, tmp
);
1371 tcg_gen_sar_i32(cpu_psw_c
, cpu_regs
[a
->rd
], count
);
1372 tcg_gen_shl_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], tmp
);
1373 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_psw_o
, cpu_psw_c
, 0);
1374 tcg_gen_setcondi_i32(TCG_COND_EQ
, tmp
, cpu_psw_c
, 0xffffffff);
1375 tcg_gen_or_i32(cpu_psw_o
, cpu_psw_o
, tmp
);
1376 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, cpu_psw_c
, 0);
1379 gen_set_label(noshift
);
1380 tcg_gen_movi_i32(cpu_psw_c
, 0);
1381 tcg_gen_movi_i32(cpu_psw_o
, 0);
1383 gen_set_label(done
);
1384 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1385 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1386 tcg_temp_free(count
);
1391 static inline void shiftr_imm(uint32_t rd
, uint32_t rs
, uint32_t imm
,
1394 static void (* const gen_sXri
[])(TCGv ret
, TCGv arg1
, int arg2
) = {
1395 tcg_gen_shri_i32
, tcg_gen_sari_i32
,
1397 tcg_debug_assert(alith
< 2);
1399 gen_sXri
[alith
](cpu_regs
[rd
], cpu_regs
[rs
], imm
- 1);
1400 tcg_gen_andi_i32(cpu_psw_c
, cpu_regs
[rd
], 0x00000001);
1401 gen_sXri
[alith
](cpu_regs
[rd
], cpu_regs
[rd
], 1);
1403 tcg_gen_mov_i32(cpu_regs
[rd
], cpu_regs
[rs
]);
1404 tcg_gen_movi_i32(cpu_psw_c
, 0);
1406 tcg_gen_movi_i32(cpu_psw_o
, 0);
1407 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[rd
]);
1408 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[rd
]);
1411 static inline void shiftr_reg(uint32_t rd
, uint32_t rs
, unsigned int alith
)
1413 TCGLabel
*noshift
, *done
;
1415 static void (* const gen_sXri
[])(TCGv ret
, TCGv arg1
, int arg2
) = {
1416 tcg_gen_shri_i32
, tcg_gen_sari_i32
,
1418 static void (* const gen_sXr
[])(TCGv ret
, TCGv arg1
, TCGv arg2
) = {
1419 tcg_gen_shr_i32
, tcg_gen_sar_i32
,
1421 tcg_debug_assert(alith
< 2);
1422 noshift
= gen_new_label();
1423 done
= gen_new_label();
1424 count
= tcg_temp_new();
1425 /* if (cpu_regs[rs]) { */
1426 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_regs
[rs
], 0, noshift
);
1427 tcg_gen_andi_i32(count
, cpu_regs
[rs
], 31);
1428 tcg_gen_subi_i32(count
, count
, 1);
1429 gen_sXr
[alith
](cpu_regs
[rd
], cpu_regs
[rd
], count
);
1430 tcg_gen_andi_i32(cpu_psw_c
, cpu_regs
[rd
], 0x00000001);
1431 gen_sXri
[alith
](cpu_regs
[rd
], cpu_regs
[rd
], 1);
1434 gen_set_label(noshift
);
1435 tcg_gen_movi_i32(cpu_psw_c
, 0);
1437 gen_set_label(done
);
1438 tcg_gen_movi_i32(cpu_psw_o
, 0);
1439 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[rd
]);
1440 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[rd
]);
1441 tcg_temp_free(count
);
1444 /* shar #imm:5, rd */
1445 /* shar #imm:5, rs2, rd */
1446 static bool trans_SHAR_irr(DisasContext
*ctx
, arg_SHAR_irr
*a
)
1448 shiftr_imm(a
->rd
, a
->rs2
, a
->imm
, 1);
1453 static bool trans_SHAR_rr(DisasContext
*ctx
, arg_SHAR_rr
*a
)
1455 shiftr_reg(a
->rd
, a
->rs
, 1);
1459 /* shlr #imm:5, rd */
1460 /* shlr #imm:5, rs2, rd */
1461 static bool trans_SHLR_irr(DisasContext
*ctx
, arg_SHLR_irr
*a
)
1463 shiftr_imm(a
->rd
, a
->rs2
, a
->imm
, 0);
1468 static bool trans_SHLR_rr(DisasContext
*ctx
, arg_SHLR_rr
*a
)
1470 shiftr_reg(a
->rd
, a
->rs
, 0);
1475 static bool trans_ROLC(DisasContext
*ctx
, arg_ROLC
*a
)
1478 tmp
= tcg_temp_new();
1479 tcg_gen_shri_i32(tmp
, cpu_regs
[a
->rd
], 31);
1480 tcg_gen_shli_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1);
1481 tcg_gen_or_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], cpu_psw_c
);
1482 tcg_gen_mov_i32(cpu_psw_c
, tmp
);
1483 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1484 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1490 static bool trans_RORC(DisasContext
*ctx
, arg_RORC
*a
)
1493 tmp
= tcg_temp_new();
1494 tcg_gen_andi_i32(tmp
, cpu_regs
[a
->rd
], 0x00000001);
1495 tcg_gen_shri_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 1);
1496 tcg_gen_shli_i32(cpu_psw_c
, cpu_psw_c
, 31);
1497 tcg_gen_or_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], cpu_psw_c
);
1498 tcg_gen_mov_i32(cpu_psw_c
, tmp
);
1499 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[a
->rd
]);
1500 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[a
->rd
]);
1504 enum {ROTR
= 0, ROTL
= 1};
1505 enum {ROT_IMM
= 0, ROT_REG
= 1};
1506 static inline void rx_rot(int ir
, int dir
, int rd
, int src
)
1510 if (ir
== ROT_IMM
) {
1511 tcg_gen_rotli_i32(cpu_regs
[rd
], cpu_regs
[rd
], src
);
1513 tcg_gen_rotl_i32(cpu_regs
[rd
], cpu_regs
[rd
], cpu_regs
[src
]);
1515 tcg_gen_andi_i32(cpu_psw_c
, cpu_regs
[rd
], 0x00000001);
1518 if (ir
== ROT_IMM
) {
1519 tcg_gen_rotri_i32(cpu_regs
[rd
], cpu_regs
[rd
], src
);
1521 tcg_gen_rotr_i32(cpu_regs
[rd
], cpu_regs
[rd
], cpu_regs
[src
]);
1523 tcg_gen_shri_i32(cpu_psw_c
, cpu_regs
[rd
], 31);
1526 tcg_gen_mov_i32(cpu_psw_z
, cpu_regs
[rd
]);
1527 tcg_gen_mov_i32(cpu_psw_s
, cpu_regs
[rd
]);
1531 static bool trans_ROTL_ir(DisasContext
*ctx
, arg_ROTL_ir
*a
)
1533 rx_rot(ROT_IMM
, ROTL
, a
->rd
, a
->imm
);
1538 static bool trans_ROTL_rr(DisasContext
*ctx
, arg_ROTL_rr
*a
)
1540 rx_rot(ROT_REG
, ROTL
, a
->rd
, a
->rs
);
1545 static bool trans_ROTR_ir(DisasContext
*ctx
, arg_ROTR_ir
*a
)
1547 rx_rot(ROT_IMM
, ROTR
, a
->rd
, a
->imm
);
1552 static bool trans_ROTR_rr(DisasContext
*ctx
, arg_ROTR_rr
*a
)
1554 rx_rot(ROT_REG
, ROTR
, a
->rd
, a
->rs
);
1559 static bool trans_REVL(DisasContext
*ctx
, arg_REVL
*a
)
1561 tcg_gen_bswap32_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs
]);
1566 static bool trans_REVW(DisasContext
*ctx
, arg_REVW
*a
)
1569 tmp
= tcg_temp_new();
1570 tcg_gen_andi_i32(tmp
, cpu_regs
[a
->rs
], 0x00ff00ff);
1571 tcg_gen_shli_i32(tmp
, tmp
, 8);
1572 tcg_gen_shri_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rs
], 8);
1573 tcg_gen_andi_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], 0x00ff00ff);
1574 tcg_gen_or_i32(cpu_regs
[a
->rd
], cpu_regs
[a
->rd
], tmp
);
1579 /* conditional branch helper */
1580 static void rx_bcnd_main(DisasContext
*ctx
, int cd
, int dst
)
1587 dc
.temp
= tcg_temp_new();
1589 t
= gen_new_label();
1590 done
= gen_new_label();
1591 tcg_gen_brcondi_i32(dc
.cond
, dc
.value
, 0, t
);
1592 gen_goto_tb(ctx
, 0, ctx
->base
.pc_next
);
1595 gen_goto_tb(ctx
, 1, ctx
->pc
+ dst
);
1596 gen_set_label(done
);
1597 tcg_temp_free(dc
.temp
);
1600 /* always true case */
1601 gen_goto_tb(ctx
, 0, ctx
->pc
+ dst
);
1604 /* always false case */
1610 /* beq dsp:3 / bne dsp:3 */
1611 /* beq dsp:8 / bne dsp:8 */
1612 /* bc dsp:8 / bnc dsp:8 */
1613 /* bgtu dsp:8 / bleu dsp:8 */
1614 /* bpz dsp:8 / bn dsp:8 */
1615 /* bge dsp:8 / blt dsp:8 */
1616 /* bgt dsp:8 / ble dsp:8 */
1617 /* bo dsp:8 / bno dsp:8 */
1618 /* beq dsp:16 / bne dsp:16 */
1619 static bool trans_BCnd(DisasContext
*ctx
, arg_BCnd
*a
)
1621 rx_bcnd_main(ctx
, a
->cd
, a
->dsp
);
1629 static bool trans_BRA(DisasContext
*ctx
, arg_BRA
*a
)
1631 rx_bcnd_main(ctx
, 14, a
->dsp
);
1636 static bool trans_BRA_l(DisasContext
*ctx
, arg_BRA_l
*a
)
1638 tcg_gen_addi_i32(cpu_pc
, cpu_regs
[a
->rd
], ctx
->pc
);
1639 ctx
->base
.is_jmp
= DISAS_JUMP
;
1643 static inline void rx_save_pc(DisasContext
*ctx
)
1645 TCGv pc
= tcg_const_i32(ctx
->base
.pc_next
);
1651 static bool trans_JMP(DisasContext
*ctx
, arg_JMP
*a
)
1653 tcg_gen_mov_i32(cpu_pc
, cpu_regs
[a
->rs
]);
1654 ctx
->base
.is_jmp
= DISAS_JUMP
;
1659 static bool trans_JSR(DisasContext
*ctx
, arg_JSR
*a
)
1662 tcg_gen_mov_i32(cpu_pc
, cpu_regs
[a
->rs
]);
1663 ctx
->base
.is_jmp
= DISAS_JUMP
;
1669 static bool trans_BSR(DisasContext
*ctx
, arg_BSR
*a
)
1672 rx_bcnd_main(ctx
, 14, a
->dsp
);
1677 static bool trans_BSR_l(DisasContext
*ctx
, arg_BSR_l
*a
)
1680 tcg_gen_addi_i32(cpu_pc
, cpu_regs
[a
->rd
], ctx
->pc
);
1681 ctx
->base
.is_jmp
= DISAS_JUMP
;
1686 static bool trans_RTS(DisasContext
*ctx
, arg_RTS
*a
)
1689 ctx
->base
.is_jmp
= DISAS_JUMP
;
1694 static bool trans_NOP(DisasContext
*ctx
, arg_NOP
*a
)
1700 static bool trans_SCMPU(DisasContext
*ctx
, arg_SCMPU
*a
)
1702 gen_helper_scmpu(cpu_env
);
1707 static bool trans_SMOVU(DisasContext
*ctx
, arg_SMOVU
*a
)
1709 gen_helper_smovu(cpu_env
);
1714 static bool trans_SMOVF(DisasContext
*ctx
, arg_SMOVF
*a
)
1716 gen_helper_smovf(cpu_env
);
1721 static bool trans_SMOVB(DisasContext
*ctx
, arg_SMOVB
*a
)
1723 gen_helper_smovb(cpu_env
);
1727 #define STRING(op) \
1729 TCGv size = tcg_const_i32(a->sz); \
1730 gen_helper_##op(cpu_env, size); \
1731 tcg_temp_free(size); \
1735 static bool trans_SUNTIL(DisasContext
*ctx
, arg_SUNTIL
*a
)
1742 static bool trans_SWHILE(DisasContext
*ctx
, arg_SWHILE
*a
)
1748 static bool trans_SSTR(DisasContext
*ctx
, arg_SSTR
*a
)
1755 static bool trans_RMPA(DisasContext
*ctx
, arg_RMPA
*a
)
1761 static void rx_mul64hi(TCGv_i64 ret
, int rs
, int rs2
)
1763 TCGv_i64 tmp0
, tmp1
;
1764 tmp0
= tcg_temp_new_i64();
1765 tmp1
= tcg_temp_new_i64();
1766 tcg_gen_ext_i32_i64(tmp0
, cpu_regs
[rs
]);
1767 tcg_gen_sari_i64(tmp0
, tmp0
, 16);
1768 tcg_gen_ext_i32_i64(tmp1
, cpu_regs
[rs2
]);
1769 tcg_gen_sari_i64(tmp1
, tmp1
, 16);
1770 tcg_gen_mul_i64(ret
, tmp0
, tmp1
);
1771 tcg_gen_shli_i64(ret
, ret
, 16);
1772 tcg_temp_free_i64(tmp0
);
1773 tcg_temp_free_i64(tmp1
);
1776 static void rx_mul64lo(TCGv_i64 ret
, int rs
, int rs2
)
1778 TCGv_i64 tmp0
, tmp1
;
1779 tmp0
= tcg_temp_new_i64();
1780 tmp1
= tcg_temp_new_i64();
1781 tcg_gen_ext_i32_i64(tmp0
, cpu_regs
[rs
]);
1782 tcg_gen_ext16s_i64(tmp0
, tmp0
);
1783 tcg_gen_ext_i32_i64(tmp1
, cpu_regs
[rs2
]);
1784 tcg_gen_ext16s_i64(tmp1
, tmp1
);
1785 tcg_gen_mul_i64(ret
, tmp0
, tmp1
);
1786 tcg_gen_shli_i64(ret
, ret
, 16);
1787 tcg_temp_free_i64(tmp0
);
1788 tcg_temp_free_i64(tmp1
);
1792 static bool trans_MULHI(DisasContext
*ctx
, arg_MULHI
*a
)
1794 rx_mul64hi(cpu_acc
, a
->rs
, a
->rs2
);
1799 static bool trans_MULLO(DisasContext
*ctx
, arg_MULLO
*a
)
1801 rx_mul64lo(cpu_acc
, a
->rs
, a
->rs2
);
1806 static bool trans_MACHI(DisasContext
*ctx
, arg_MACHI
*a
)
1809 tmp
= tcg_temp_new_i64();
1810 rx_mul64hi(tmp
, a
->rs
, a
->rs2
);
1811 tcg_gen_add_i64(cpu_acc
, cpu_acc
, tmp
);
1812 tcg_temp_free_i64(tmp
);
1817 static bool trans_MACLO(DisasContext
*ctx
, arg_MACLO
*a
)
1820 tmp
= tcg_temp_new_i64();
1821 rx_mul64lo(tmp
, a
->rs
, a
->rs2
);
1822 tcg_gen_add_i64(cpu_acc
, cpu_acc
, tmp
);
1823 tcg_temp_free_i64(tmp
);
1828 static bool trans_MVFACHI(DisasContext
*ctx
, arg_MVFACHI
*a
)
1830 tcg_gen_extrh_i64_i32(cpu_regs
[a
->rd
], cpu_acc
);
1835 static bool trans_MVFACMI(DisasContext
*ctx
, arg_MVFACMI
*a
)
1838 rd64
= tcg_temp_new_i64();
1839 tcg_gen_extract_i64(rd64
, cpu_acc
, 16, 32);
1840 tcg_gen_extrl_i64_i32(cpu_regs
[a
->rd
], rd64
);
1841 tcg_temp_free_i64(rd64
);
1846 static bool trans_MVTACHI(DisasContext
*ctx
, arg_MVTACHI
*a
)
1849 rs64
= tcg_temp_new_i64();
1850 tcg_gen_extu_i32_i64(rs64
, cpu_regs
[a
->rs
]);
1851 tcg_gen_deposit_i64(cpu_acc
, cpu_acc
, rs64
, 32, 32);
1852 tcg_temp_free_i64(rs64
);
1857 static bool trans_MVTACLO(DisasContext
*ctx
, arg_MVTACLO
*a
)
1860 rs64
= tcg_temp_new_i64();
1861 tcg_gen_extu_i32_i64(rs64
, cpu_regs
[a
->rs
]);
1862 tcg_gen_deposit_i64(cpu_acc
, cpu_acc
, rs64
, 0, 32);
1863 tcg_temp_free_i64(rs64
);
1868 static bool trans_RACW(DisasContext
*ctx
, arg_RACW
*a
)
1870 TCGv imm
= tcg_const_i32(a
->imm
+ 1);
1871 gen_helper_racw(cpu_env
, imm
);
1877 static bool trans_SAT(DisasContext
*ctx
, arg_SAT
*a
)
1880 tmp
= tcg_temp_new();
1881 z
= tcg_const_i32(0);
1882 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1883 tcg_gen_sari_i32(tmp
, cpu_psw_s
, 31);
1884 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1885 tcg_gen_xori_i32(tmp
, tmp
, 0x80000000);
1886 tcg_gen_movcond_i32(TCG_COND_LT
, cpu_regs
[a
->rd
],
1887 cpu_psw_o
, z
, tmp
, cpu_regs
[a
->rd
]);
1894 static bool trans_SATR(DisasContext
*ctx
, arg_SATR
*a
)
1896 gen_helper_satr(cpu_env
);
1900 #define cat3(a, b, c) a##b##c
1901 #define FOP(name, op) \
1902 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1903 cat3(arg_, name, _ir) * a) \
1905 TCGv imm = tcg_const_i32(li(ctx, 0)); \
1906 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1907 cpu_regs[a->rd], imm); \
1908 tcg_temp_free(imm); \
1911 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1912 cat3(arg_, name, _mr) * a) \
1915 mem = tcg_temp_new(); \
1916 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1917 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1918 cpu_regs[a->rd], val); \
1919 tcg_temp_free(mem); \
1923 #define FCONVOP(name, op) \
1924 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1927 mem = tcg_temp_new(); \
1928 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1929 gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
1930 tcg_temp_free(mem); \
1940 static bool trans_FCMP_ir(DisasContext
*ctx
, arg_FCMP_ir
* a
)
1942 TCGv imm
= tcg_const_i32(li(ctx
, 0));
1943 gen_helper_fcmp(cpu_env
, cpu_regs
[a
->rd
], imm
);
1948 /* fcmp dsp[rs], rd */
1950 static bool trans_FCMP_mr(DisasContext
*ctx
, arg_FCMP_mr
*a
)
1953 mem
= tcg_temp_new();
1954 val
= rx_load_source(ctx
, mem
, a
->ld
, MO_32
, a
->rs
);
1955 gen_helper_fcmp(cpu_env
, cpu_regs
[a
->rd
], val
);
1961 FCONVOP(ROUND
, round
)
1964 /* itof dsp[rs], rd */
1965 static bool trans_ITOF(DisasContext
*ctx
, arg_ITOF
* a
)
1968 mem
= tcg_temp_new();
1969 val
= rx_load_source(ctx
, mem
, a
->ld
, a
->mi
, a
->rs
);
1970 gen_helper_itof(cpu_regs
[a
->rd
], cpu_env
, val
);
1975 static void rx_bsetm(TCGv mem
, TCGv mask
)
1978 val
= tcg_temp_new();
1979 rx_gen_ld(MO_8
, val
, mem
);
1980 tcg_gen_or_i32(val
, val
, mask
);
1981 rx_gen_st(MO_8
, val
, mem
);
1985 static void rx_bclrm(TCGv mem
, TCGv mask
)
1988 val
= tcg_temp_new();
1989 rx_gen_ld(MO_8
, val
, mem
);
1990 tcg_gen_andc_i32(val
, val
, mask
);
1991 rx_gen_st(MO_8
, val
, mem
);
1995 static void rx_btstm(TCGv mem
, TCGv mask
)
1998 val
= tcg_temp_new();
1999 rx_gen_ld(MO_8
, val
, mem
);
2000 tcg_gen_and_i32(val
, val
, mask
);
2001 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, val
, 0);
2002 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_c
);
2006 static void rx_bnotm(TCGv mem
, TCGv mask
)
2009 val
= tcg_temp_new();
2010 rx_gen_ld(MO_8
, val
, mem
);
2011 tcg_gen_xor_i32(val
, val
, mask
);
2012 rx_gen_st(MO_8
, val
, mem
);
2016 static void rx_bsetr(TCGv reg
, TCGv mask
)
2018 tcg_gen_or_i32(reg
, reg
, mask
);
2021 static void rx_bclrr(TCGv reg
, TCGv mask
)
2023 tcg_gen_andc_i32(reg
, reg
, mask
);
2026 static inline void rx_btstr(TCGv reg
, TCGv mask
)
2029 t0
= tcg_temp_new();
2030 tcg_gen_and_i32(t0
, reg
, mask
);
2031 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_psw_c
, t0
, 0);
2032 tcg_gen_mov_i32(cpu_psw_z
, cpu_psw_c
);
2036 static inline void rx_bnotr(TCGv reg
, TCGv mask
)
2038 tcg_gen_xor_i32(reg
, reg
, mask
);
2041 #define BITOP(name, op) \
2042 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
2043 cat3(arg_, name, _im) * a) \
2045 TCGv mask, mem, addr; \
2046 mem = tcg_temp_new(); \
2047 mask = tcg_const_i32(1 << a->imm); \
2048 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2049 cat3(rx_, op, m)(addr, mask); \
2050 tcg_temp_free(mask); \
2051 tcg_temp_free(mem); \
2054 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
2055 cat3(arg_, name, _ir) * a) \
2058 mask = tcg_const_i32(1 << a->imm); \
2059 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2060 tcg_temp_free(mask); \
2063 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
2064 cat3(arg_, name, _rr) * a) \
2067 mask = tcg_const_i32(1); \
2068 b = tcg_temp_new(); \
2069 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
2070 tcg_gen_shl_i32(mask, mask, b); \
2071 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2072 tcg_temp_free(mask); \
2076 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
2077 cat3(arg_, name, _rm) * a) \
2079 TCGv mask, mem, addr, b; \
2080 mask = tcg_const_i32(1); \
2081 b = tcg_temp_new(); \
2082 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
2083 tcg_gen_shl_i32(mask, mask, b); \
2084 mem = tcg_temp_new(); \
2085 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2086 cat3(rx_, op, m)(addr, mask); \
2087 tcg_temp_free(mem); \
2088 tcg_temp_free(mask); \
2098 static inline void bmcnd_op(TCGv val
, TCGCond cond
, int pos
)
2102 dc
.temp
= tcg_temp_new();
2103 bit
= tcg_temp_new();
2104 psw_cond(&dc
, cond
);
2105 tcg_gen_andi_i32(val
, val
, ~(1 << pos
));
2106 tcg_gen_setcondi_i32(dc
.cond
, bit
, dc
.value
, 0);
2107 tcg_gen_deposit_i32(val
, val
, bit
, pos
, 1);
2109 tcg_temp_free(dc
.temp
);
2112 /* bmcnd #imm, dsp[rd] */
2113 static bool trans_BMCnd_im(DisasContext
*ctx
, arg_BMCnd_im
*a
)
2115 TCGv val
, mem
, addr
;
2116 val
= tcg_temp_new();
2117 mem
= tcg_temp_new();
2118 addr
= rx_index_addr(ctx
, mem
, a
->ld
, MO_8
, a
->rd
);
2119 rx_gen_ld(MO_8
, val
, addr
);
2120 bmcnd_op(val
, a
->cd
, a
->imm
);
2121 rx_gen_st(MO_8
, val
, addr
);
2127 /* bmcond #imm, rd */
2128 static bool trans_BMCnd_ir(DisasContext
*ctx
, arg_BMCnd_ir
*a
)
2130 bmcnd_op(cpu_regs
[a
->rd
], a
->cd
, a
->imm
);
2143 static inline void clrsetpsw(DisasContext
*ctx
, int cb
, int val
)
2148 tcg_gen_movi_i32(cpu_psw_c
, val
);
2151 tcg_gen_movi_i32(cpu_psw_z
, val
== 0);
2154 tcg_gen_movi_i32(cpu_psw_s
, val
? -1 : 0);
2157 tcg_gen_movi_i32(cpu_psw_o
, val
<< 31);
2160 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid distination %d", cb
);
2163 } else if (is_privileged(ctx
, 0)) {
2166 tcg_gen_movi_i32(cpu_psw_i
, val
);
2167 ctx
->base
.is_jmp
= DISAS_UPDATE
;
2170 tcg_gen_movi_i32(cpu_psw_u
, val
);
2173 qemu_log_mask(LOG_GUEST_ERROR
, "Invalid distination %d", cb
);
2180 static bool trans_CLRPSW(DisasContext
*ctx
, arg_CLRPSW
*a
)
2182 clrsetpsw(ctx
, a
->cb
, 0);
2187 static bool trans_SETPSW(DisasContext
*ctx
, arg_SETPSW
*a
)
2189 clrsetpsw(ctx
, a
->cb
, 1);
2194 static bool trans_MVTIPL(DisasContext
*ctx
, arg_MVTIPL
*a
)
2196 if (is_privileged(ctx
, 1)) {
2197 tcg_gen_movi_i32(cpu_psw_ipl
, a
->imm
);
2198 ctx
->base
.is_jmp
= DISAS_UPDATE
;
2204 static bool trans_MVTC_i(DisasContext
*ctx
, arg_MVTC_i
*a
)
2208 imm
= tcg_const_i32(a
->imm
);
2209 move_to_cr(ctx
, imm
, a
->cr
);
2210 if (a
->cr
== 0 && is_privileged(ctx
, 0)) {
2211 ctx
->base
.is_jmp
= DISAS_UPDATE
;
2218 static bool trans_MVTC_r(DisasContext
*ctx
, arg_MVTC_r
*a
)
2220 move_to_cr(ctx
, cpu_regs
[a
->rs
], a
->cr
);
2221 if (a
->cr
== 0 && is_privileged(ctx
, 0)) {
2222 ctx
->base
.is_jmp
= DISAS_UPDATE
;
2228 static bool trans_MVFC(DisasContext
*ctx
, arg_MVFC
*a
)
2230 move_from_cr(cpu_regs
[a
->rd
], a
->cr
, ctx
->pc
);
2235 static bool trans_RTFI(DisasContext
*ctx
, arg_RTFI
*a
)
2238 if (is_privileged(ctx
, 1)) {
2239 psw
= tcg_temp_new();
2240 tcg_gen_mov_i32(cpu_pc
, cpu_bpc
);
2241 tcg_gen_mov_i32(psw
, cpu_bpsw
);
2242 gen_helper_set_psw_rte(cpu_env
, psw
);
2243 ctx
->base
.is_jmp
= DISAS_EXIT
;
2250 static bool trans_RTE(DisasContext
*ctx
, arg_RTE
*a
)
2253 if (is_privileged(ctx
, 1)) {
2254 psw
= tcg_temp_new();
2257 gen_helper_set_psw_rte(cpu_env
, psw
);
2258 ctx
->base
.is_jmp
= DISAS_EXIT
;
2265 static bool trans_BRK(DisasContext
*ctx
, arg_BRK
*a
)
2267 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2268 gen_helper_rxbrk(cpu_env
);
2269 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2274 static bool trans_INT(DisasContext
*ctx
, arg_INT
*a
)
2278 tcg_debug_assert(a
->imm
< 0x100);
2279 vec
= tcg_const_i32(a
->imm
);
2280 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2281 gen_helper_rxint(cpu_env
, vec
);
2283 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2288 static bool trans_WAIT(DisasContext
*ctx
, arg_WAIT
*a
)
2290 if (is_privileged(ctx
, 1)) {
2291 tcg_gen_addi_i32(cpu_pc
, cpu_pc
, 2);
2292 gen_helper_wait(cpu_env
);
2297 static void rx_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
2299 CPURXState
*env
= cs
->env_ptr
;
2300 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2304 static void rx_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
2308 static void rx_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
2310 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2312 tcg_gen_insn_start(ctx
->base
.pc_next
);
2315 static bool rx_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
2316 const CPUBreakpoint
*bp
)
2318 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2320 /* We have hit a breakpoint - make sure PC is up-to-date */
2321 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2322 gen_helper_debug(cpu_env
);
2323 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2324 ctx
->base
.pc_next
+= 1;
2328 static void rx_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
2330 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2333 ctx
->pc
= ctx
->base
.pc_next
;
2334 insn
= decode_load(ctx
);
2335 if (!decode(ctx
, insn
)) {
2336 gen_helper_raise_illegal_instruction(cpu_env
);
2340 static void rx_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
2342 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2344 switch (ctx
->base
.is_jmp
) {
2346 case DISAS_TOO_MANY
:
2347 gen_goto_tb(ctx
, 0, dcbase
->pc_next
);
2350 if (ctx
->base
.singlestep_enabled
) {
2351 gen_helper_debug(cpu_env
);
2353 tcg_gen_lookup_and_goto_ptr();
2357 tcg_gen_movi_i32(cpu_pc
, ctx
->base
.pc_next
);
2359 tcg_gen_exit_tb(NULL
, 0);
2361 case DISAS_NORETURN
:
2364 g_assert_not_reached();
2368 static void rx_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
2370 qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
2371 log_target_disas(cs
, dcbase
->pc_first
, dcbase
->tb
->size
);
2374 static const TranslatorOps rx_tr_ops
= {
2375 .init_disas_context
= rx_tr_init_disas_context
,
2376 .tb_start
= rx_tr_tb_start
,
2377 .insn_start
= rx_tr_insn_start
,
2378 .breakpoint_check
= rx_tr_breakpoint_check
,
2379 .translate_insn
= rx_tr_translate_insn
,
2380 .tb_stop
= rx_tr_tb_stop
,
2381 .disas_log
= rx_tr_disas_log
,
2384 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
2388 translator_loop(&rx_tr_ops
, &dc
.base
, cs
, tb
, max_insns
);
2391 void restore_state_to_opc(CPURXState
*env
, TranslationBlock
*tb
,
2397 #define ALLOC_REGISTER(sym, name) \
2398 cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
2399 offsetof(CPURXState, sym), name)
2401 void rx_translate_init(void)
2403 static const char * const regnames
[NUM_REGS
] = {
2404 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2405 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2409 for (i
= 0; i
< NUM_REGS
; i
++) {
2410 cpu_regs
[i
] = tcg_global_mem_new_i32(cpu_env
,
2411 offsetof(CPURXState
, regs
[i
]),
2414 ALLOC_REGISTER(pc
, "PC");
2415 ALLOC_REGISTER(psw_o
, "PSW(O)");
2416 ALLOC_REGISTER(psw_s
, "PSW(S)");
2417 ALLOC_REGISTER(psw_z
, "PSW(Z)");
2418 ALLOC_REGISTER(psw_c
, "PSW(C)");
2419 ALLOC_REGISTER(psw_u
, "PSW(U)");
2420 ALLOC_REGISTER(psw_i
, "PSW(I)");
2421 ALLOC_REGISTER(psw_pm
, "PSW(PM)");
2422 ALLOC_REGISTER(psw_ipl
, "PSW(IPL)");
2423 ALLOC_REGISTER(usp
, "USP");
2424 ALLOC_REGISTER(fpsw
, "FPSW");
2425 ALLOC_REGISTER(bpsw
, "BPSW");
2426 ALLOC_REGISTER(bpc
, "BPC");
2427 ALLOC_REGISTER(isp
, "ISP");
2428 ALLOC_REGISTER(fintv
, "FINTV");
2429 ALLOC_REGISTER(intb
, "INTB");
2430 cpu_acc
= tcg_global_mem_new_i64(cpu_env
,
2431 offsetof(CPURXState
, acc
), "ACC");