4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
29 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
40 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_flushw(E) qemu_build_not_reached()
46 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
47 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
48 # define gen_helper_restored(E) qemu_build_not_reached()
49 # define gen_helper_saved(E) qemu_build_not_reached()
50 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
51 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
52 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
53 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
54 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
55 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
56 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
57 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
58 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
59 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
63 /* Dynamic PC, must exit to main loop. */
65 /* Dynamic PC, one of two values according to jump_pc[T2]. */
67 /* Dynamic PC, may lookup next TB. */
68 #define DYNAMIC_PC_LOOKUP 3
70 #define DISAS_EXIT DISAS_TARGET_0
72 /* global register indexes */
73 static TCGv_ptr cpu_regwptr
;
74 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
75 static TCGv_i32 cpu_cc_op
;
76 static TCGv_i32 cpu_psr
;
77 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
78 static TCGv cpu_regs
[32];
83 static TCGv_i32 cpu_xcc
, cpu_fprs
;
86 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
87 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
89 /* Floating point registers */
90 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
92 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
94 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
95 # define env64_field_offsetof(X) env_field_offsetof(X)
97 # define env32_field_offsetof(X) env_field_offsetof(X)
98 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
101 typedef struct DisasDelayException
{
102 struct DisasDelayException
*next
;
105 /* Saved state at parent insn. */
108 } DisasDelayException
;
110 typedef struct DisasContext
{
111 DisasContextBase base
;
112 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
113 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
114 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
117 bool address_mask_32bit
;
118 #ifndef CONFIG_USER_ONLY
120 #ifdef TARGET_SPARC64
125 uint32_t cc_op
; /* current CC operation */
127 #ifdef TARGET_SPARC64
131 DisasDelayException
*delay_excp_list
;
140 // This function uses non-native bit order
141 #define GET_FIELD(X, FROM, TO) \
142 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
144 // This function uses the order in the manuals, i.e. bit 0 is 2^0
145 #define GET_FIELD_SP(X, FROM, TO) \
146 GET_FIELD(X, 31 - (TO), 31 - (FROM))
148 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
149 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
151 #ifdef TARGET_SPARC64
152 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
153 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
155 #define DFPREG(r) (r & 0x1e)
156 #define QFPREG(r) (r & 0x1c)
159 #define UA2005_HTRAP_MASK 0xff
160 #define V8_TRAP_MASK 0x7f
162 static int sign_extend(int x
, int len
)
165 return (x
<< len
) >> len
;
168 #define IS_IMM (insn & (1<<13))
170 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
172 #if defined(TARGET_SPARC64)
173 int bit
= (rd
< 32) ? 1 : 2;
174 /* If we know we've already set this bit within the TB,
175 we can avoid setting it again. */
176 if (!(dc
->fprs_dirty
& bit
)) {
177 dc
->fprs_dirty
|= bit
;
178 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
183 /* floating point registers moves */
184 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
186 TCGv_i32 ret
= tcg_temp_new_i32();
188 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
190 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
195 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
197 TCGv_i64 t
= tcg_temp_new_i64();
199 tcg_gen_extu_i32_i64(t
, v
);
200 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
201 (dst
& 1 ? 0 : 32), 32);
202 gen_update_fprs_dirty(dc
, dst
);
205 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
207 return tcg_temp_new_i32();
210 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
213 return cpu_fpr
[src
/ 2];
216 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
219 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
220 gen_update_fprs_dirty(dc
, dst
);
223 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
225 return cpu_fpr
[DFPREG(dst
) / 2];
228 static void gen_op_load_fpr_QT0(unsigned int src
)
230 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
231 offsetof(CPU_QuadU
, ll
.upper
));
232 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
233 offsetof(CPU_QuadU
, ll
.lower
));
236 static void gen_op_load_fpr_QT1(unsigned int src
)
238 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
239 offsetof(CPU_QuadU
, ll
.upper
));
240 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
241 offsetof(CPU_QuadU
, ll
.lower
));
244 static void gen_op_store_QT0_fpr(unsigned int dst
)
246 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
247 offsetof(CPU_QuadU
, ll
.upper
));
248 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
249 offsetof(CPU_QuadU
, ll
.lower
));
252 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
253 TCGv_i64 v1
, TCGv_i64 v2
)
257 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
258 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
259 gen_update_fprs_dirty(dc
, dst
);
262 #ifdef TARGET_SPARC64
263 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
266 return cpu_fpr
[src
/ 2];
269 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
272 return cpu_fpr
[src
/ 2 + 1];
275 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
280 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
281 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
282 gen_update_fprs_dirty(dc
, rd
);
287 #ifdef CONFIG_USER_ONLY
288 #define supervisor(dc) 0
289 #define hypervisor(dc) 0
291 #ifdef TARGET_SPARC64
292 #define hypervisor(dc) (dc->hypervisor)
293 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
295 #define supervisor(dc) (dc->supervisor)
296 #define hypervisor(dc) 0
300 #if !defined(TARGET_SPARC64)
301 # define AM_CHECK(dc) false
302 #elif defined(TARGET_ABI32)
303 # define AM_CHECK(dc) true
304 #elif defined(CONFIG_USER_ONLY)
305 # define AM_CHECK(dc) false
307 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
310 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
313 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
317 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
319 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
322 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
326 return cpu_regs
[reg
];
328 TCGv t
= tcg_temp_new();
329 tcg_gen_movi_tl(t
, 0);
334 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
338 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
342 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
346 return cpu_regs
[reg
];
348 return tcg_temp_new();
352 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
354 return translator_use_goto_tb(&s
->base
, pc
) &&
355 translator_use_goto_tb(&s
->base
, npc
);
358 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
359 target_ulong pc
, target_ulong npc
)
361 if (use_goto_tb(s
, pc
, npc
)) {
362 /* jump to same page: we can use a direct jump */
363 tcg_gen_goto_tb(tb_num
);
364 tcg_gen_movi_tl(cpu_pc
, pc
);
365 tcg_gen_movi_tl(cpu_npc
, npc
);
366 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
368 /* jump to another page: we can use an indirect jump */
369 tcg_gen_movi_tl(cpu_pc
, pc
);
370 tcg_gen_movi_tl(cpu_npc
, npc
);
371 tcg_gen_lookup_and_goto_ptr();
376 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
378 tcg_gen_extu_i32_tl(reg
, src
);
379 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
382 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
384 tcg_gen_extu_i32_tl(reg
, src
);
385 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
388 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
390 tcg_gen_extu_i32_tl(reg
, src
);
391 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
394 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
396 tcg_gen_extu_i32_tl(reg
, src
);
397 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
400 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
402 tcg_gen_mov_tl(cpu_cc_src
, src1
);
403 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
404 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
405 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
408 static TCGv_i32
gen_add32_carry32(void)
410 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
412 /* Carry is computed from a previous add: (dst < src) */
413 #if TARGET_LONG_BITS == 64
414 cc_src1_32
= tcg_temp_new_i32();
415 cc_src2_32
= tcg_temp_new_i32();
416 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
417 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
419 cc_src1_32
= cpu_cc_dst
;
420 cc_src2_32
= cpu_cc_src
;
423 carry_32
= tcg_temp_new_i32();
424 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
429 static TCGv_i32
gen_sub32_carry32(void)
431 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
433 /* Carry is computed from a previous borrow: (src1 < src2) */
434 #if TARGET_LONG_BITS == 64
435 cc_src1_32
= tcg_temp_new_i32();
436 cc_src2_32
= tcg_temp_new_i32();
437 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
438 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
440 cc_src1_32
= cpu_cc_src
;
441 cc_src2_32
= cpu_cc_src2
;
444 carry_32
= tcg_temp_new_i32();
445 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
450 static void gen_op_addc_int(TCGv dst
, TCGv src1
, TCGv src2
,
451 TCGv_i32 carry_32
, bool update_cc
)
453 tcg_gen_add_tl(dst
, src1
, src2
);
455 #ifdef TARGET_SPARC64
456 TCGv carry
= tcg_temp_new();
457 tcg_gen_extu_i32_tl(carry
, carry_32
);
458 tcg_gen_add_tl(dst
, dst
, carry
);
460 tcg_gen_add_i32(dst
, dst
, carry_32
);
464 tcg_debug_assert(dst
== cpu_cc_dst
);
465 tcg_gen_mov_tl(cpu_cc_src
, src1
);
466 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
470 static void gen_op_addc_int_add(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
474 if (TARGET_LONG_BITS
== 64) {
475 gen_op_addc_int(dst
, src1
, src2
, gen_add32_carry32(), update_cc
);
480 * We can re-use the host's hardware carry generation by using
481 * an ADD2 opcode. We discard the low part of the output.
482 * Ideally we'd combine this operation with the add that
483 * generated the carry in the first place.
485 discard
= tcg_temp_new();
486 tcg_gen_add2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
489 tcg_debug_assert(dst
== cpu_cc_dst
);
490 tcg_gen_mov_tl(cpu_cc_src
, src1
);
491 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
495 static void gen_op_addc_add(TCGv dst
, TCGv src1
, TCGv src2
)
497 gen_op_addc_int_add(dst
, src1
, src2
, false);
500 static void gen_op_addccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
502 gen_op_addc_int_add(dst
, src1
, src2
, true);
505 static void gen_op_addc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
507 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), false);
510 static void gen_op_addccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
512 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), true);
515 static void gen_op_addc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
518 TCGv_i32 carry_32
= tcg_temp_new_i32();
519 gen_helper_compute_C_icc(carry_32
, tcg_env
);
520 gen_op_addc_int(dst
, src1
, src2
, carry_32
, update_cc
);
523 static void gen_op_addc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
525 gen_op_addc_int_generic(dst
, src1
, src2
, false);
528 static void gen_op_addccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
530 gen_op_addc_int_generic(dst
, src1
, src2
, true);
533 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
535 tcg_gen_mov_tl(cpu_cc_src
, src1
);
536 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
537 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
538 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
541 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
542 TCGv src2
, int update_cc
)
550 /* Carry is known to be zero. Fall back to plain SUB. */
552 gen_op_sub_cc(dst
, src1
, src2
);
554 tcg_gen_sub_tl(dst
, src1
, src2
);
561 carry_32
= gen_add32_carry32();
567 if (TARGET_LONG_BITS
== 32) {
568 /* We can re-use the host's hardware carry generation by using
569 a SUB2 opcode. We discard the low part of the output.
570 Ideally we'd combine this operation with the add that
571 generated the carry in the first place. */
572 carry
= tcg_temp_new();
573 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
576 carry_32
= gen_sub32_carry32();
580 /* We need external help to produce the carry. */
581 carry_32
= tcg_temp_new_i32();
582 gen_helper_compute_C_icc(carry_32
, tcg_env
);
586 #if TARGET_LONG_BITS == 64
587 carry
= tcg_temp_new();
588 tcg_gen_extu_i32_i64(carry
, carry_32
);
593 tcg_gen_sub_tl(dst
, src1
, src2
);
594 tcg_gen_sub_tl(dst
, dst
, carry
);
598 tcg_gen_mov_tl(cpu_cc_src
, src1
);
599 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
600 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
601 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
602 dc
->cc_op
= CC_OP_SUBX
;
606 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
608 TCGv r_temp
, zero
, t0
;
610 r_temp
= tcg_temp_new();
617 zero
= tcg_constant_tl(0);
618 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
619 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
620 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
621 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
625 // env->y = (b2 << 31) | (env->y >> 1);
626 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
627 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
630 gen_mov_reg_N(t0
, cpu_psr
);
631 gen_mov_reg_V(r_temp
, cpu_psr
);
632 tcg_gen_xor_tl(t0
, t0
, r_temp
);
634 // T0 = (b1 << 31) | (T0 >> 1);
636 tcg_gen_shli_tl(t0
, t0
, 31);
637 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
638 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
640 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
642 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
645 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
647 #if TARGET_LONG_BITS == 32
649 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
651 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
654 TCGv t0
= tcg_temp_new_i64();
655 TCGv t1
= tcg_temp_new_i64();
658 tcg_gen_ext32s_i64(t0
, src1
);
659 tcg_gen_ext32s_i64(t1
, src2
);
661 tcg_gen_ext32u_i64(t0
, src1
);
662 tcg_gen_ext32u_i64(t1
, src2
);
665 tcg_gen_mul_i64(dst
, t0
, t1
);
666 tcg_gen_shri_i64(cpu_y
, dst
, 32);
670 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
672 /* zero-extend truncated operands before multiplication */
673 gen_op_multiply(dst
, src1
, src2
, 0);
676 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
678 /* sign-extend truncated operands before multiplication */
679 gen_op_multiply(dst
, src1
, src2
, 1);
683 static void gen_op_eval_ba(TCGv dst
)
685 tcg_gen_movi_tl(dst
, 1);
689 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
691 gen_mov_reg_Z(dst
, src
);
695 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
697 TCGv t0
= tcg_temp_new();
698 gen_mov_reg_N(t0
, src
);
699 gen_mov_reg_V(dst
, src
);
700 tcg_gen_xor_tl(dst
, dst
, t0
);
701 gen_mov_reg_Z(t0
, src
);
702 tcg_gen_or_tl(dst
, dst
, t0
);
706 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
708 TCGv t0
= tcg_temp_new();
709 gen_mov_reg_V(t0
, src
);
710 gen_mov_reg_N(dst
, src
);
711 tcg_gen_xor_tl(dst
, dst
, t0
);
715 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
717 TCGv t0
= tcg_temp_new();
718 gen_mov_reg_Z(t0
, src
);
719 gen_mov_reg_C(dst
, src
);
720 tcg_gen_or_tl(dst
, dst
, t0
);
724 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
726 gen_mov_reg_C(dst
, src
);
730 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
732 gen_mov_reg_V(dst
, src
);
736 static void gen_op_eval_bn(TCGv dst
)
738 tcg_gen_movi_tl(dst
, 0);
742 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
744 gen_mov_reg_N(dst
, src
);
748 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
750 gen_mov_reg_Z(dst
, src
);
751 tcg_gen_xori_tl(dst
, dst
, 0x1);
755 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
757 gen_op_eval_ble(dst
, src
);
758 tcg_gen_xori_tl(dst
, dst
, 0x1);
762 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
764 gen_op_eval_bl(dst
, src
);
765 tcg_gen_xori_tl(dst
, dst
, 0x1);
769 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
771 gen_op_eval_bleu(dst
, src
);
772 tcg_gen_xori_tl(dst
, dst
, 0x1);
776 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
778 gen_mov_reg_C(dst
, src
);
779 tcg_gen_xori_tl(dst
, dst
, 0x1);
783 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
785 gen_mov_reg_N(dst
, src
);
786 tcg_gen_xori_tl(dst
, dst
, 0x1);
790 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
792 gen_mov_reg_V(dst
, src
);
793 tcg_gen_xori_tl(dst
, dst
, 0x1);
797 FPSR bit field FCC1 | FCC0:
803 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
804 unsigned int fcc_offset
)
806 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
807 tcg_gen_andi_tl(reg
, reg
, 0x1);
810 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
812 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
813 tcg_gen_andi_tl(reg
, reg
, 0x1);
817 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
819 TCGv t0
= tcg_temp_new();
820 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
821 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
822 tcg_gen_or_tl(dst
, dst
, t0
);
825 // 1 or 2: FCC0 ^ FCC1
826 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
828 TCGv t0
= tcg_temp_new();
829 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
830 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
831 tcg_gen_xor_tl(dst
, dst
, t0
);
835 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
837 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
841 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
843 TCGv t0
= tcg_temp_new();
844 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
845 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
846 tcg_gen_andc_tl(dst
, dst
, t0
);
850 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
852 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
856 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
858 TCGv t0
= tcg_temp_new();
859 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
860 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
861 tcg_gen_andc_tl(dst
, t0
, dst
);
865 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
867 TCGv t0
= tcg_temp_new();
868 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
869 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
870 tcg_gen_and_tl(dst
, dst
, t0
);
874 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
876 TCGv t0
= tcg_temp_new();
877 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
878 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
879 tcg_gen_or_tl(dst
, dst
, t0
);
880 tcg_gen_xori_tl(dst
, dst
, 0x1);
883 // 0 or 3: !(FCC0 ^ FCC1)
884 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
886 TCGv t0
= tcg_temp_new();
887 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
888 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
889 tcg_gen_xor_tl(dst
, dst
, t0
);
890 tcg_gen_xori_tl(dst
, dst
, 0x1);
894 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
896 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
897 tcg_gen_xori_tl(dst
, dst
, 0x1);
900 // !1: !(FCC0 & !FCC1)
901 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
903 TCGv t0
= tcg_temp_new();
904 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
905 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
906 tcg_gen_andc_tl(dst
, dst
, t0
);
907 tcg_gen_xori_tl(dst
, dst
, 0x1);
911 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
913 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
914 tcg_gen_xori_tl(dst
, dst
, 0x1);
917 // !2: !(!FCC0 & FCC1)
918 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
920 TCGv t0
= tcg_temp_new();
921 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
922 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
923 tcg_gen_andc_tl(dst
, t0
, dst
);
924 tcg_gen_xori_tl(dst
, dst
, 0x1);
927 // !3: !(FCC0 & FCC1)
928 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
930 TCGv t0
= tcg_temp_new();
931 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
932 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
933 tcg_gen_and_tl(dst
, dst
, t0
);
934 tcg_gen_xori_tl(dst
, dst
, 0x1);
937 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
938 target_ulong pc2
, TCGv r_cond
)
940 TCGLabel
*l1
= gen_new_label();
942 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
944 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
947 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
950 static void gen_generic_branch(DisasContext
*dc
)
952 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
953 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
954 TCGv zero
= tcg_constant_tl(0);
956 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
959 /* call this function before using the condition register as it may
960 have been set for a jump */
961 static void flush_cond(DisasContext
*dc
)
963 if (dc
->npc
== JUMP_PC
) {
964 gen_generic_branch(dc
);
965 dc
->npc
= DYNAMIC_PC_LOOKUP
;
969 static void save_npc(DisasContext
*dc
)
974 gen_generic_branch(dc
);
975 dc
->npc
= DYNAMIC_PC_LOOKUP
;
978 case DYNAMIC_PC_LOOKUP
:
981 g_assert_not_reached();
984 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
988 static void update_psr(DisasContext
*dc
)
990 if (dc
->cc_op
!= CC_OP_FLAGS
) {
991 dc
->cc_op
= CC_OP_FLAGS
;
992 gen_helper_compute_psr(tcg_env
);
996 static void save_state(DisasContext
*dc
)
998 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1002 static void gen_exception(DisasContext
*dc
, int which
)
1005 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
1006 dc
->base
.is_jmp
= DISAS_NORETURN
;
1009 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
1011 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
1013 e
->next
= dc
->delay_excp_list
;
1014 dc
->delay_excp_list
= e
;
1016 e
->lab
= gen_new_label();
1019 /* Caller must have used flush_cond before branch. */
1020 assert(e
->npc
!= JUMP_PC
);
1026 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
1028 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
1031 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
1033 TCGv t
= tcg_temp_new();
1036 tcg_gen_andi_tl(t
, addr
, mask
);
1039 lab
= delay_exception(dc
, TT_UNALIGNED
);
1040 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
1043 static void gen_mov_pc_npc(DisasContext
*dc
)
1048 gen_generic_branch(dc
);
1049 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1050 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1053 case DYNAMIC_PC_LOOKUP
:
1054 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1058 g_assert_not_reached();
1065 static void gen_op_next_insn(void)
1067 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1068 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1071 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1074 static int subcc_cond
[16] = {
1090 -1, /* no overflow */
1093 static int logic_cond
[16] = {
1095 TCG_COND_EQ
, /* eq: Z */
1096 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1097 TCG_COND_LT
, /* lt: N ^ V -> N */
1098 TCG_COND_EQ
, /* leu: C | Z -> Z */
1099 TCG_COND_NEVER
, /* ltu: C -> 0 */
1100 TCG_COND_LT
, /* neg: N */
1101 TCG_COND_NEVER
, /* vs: V -> 0 */
1103 TCG_COND_NE
, /* ne: !Z */
1104 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1105 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1106 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1107 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1108 TCG_COND_GE
, /* pos: !N */
1109 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1115 #ifdef TARGET_SPARC64
1125 switch (dc
->cc_op
) {
1127 cmp
->cond
= logic_cond
[cond
];
1129 cmp
->is_bool
= false;
1130 cmp
->c2
= tcg_constant_tl(0);
1131 #ifdef TARGET_SPARC64
1133 cmp
->c1
= tcg_temp_new();
1134 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1138 cmp
->c1
= cpu_cc_dst
;
1145 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1146 goto do_compare_dst_0
;
1148 case 7: /* overflow */
1149 case 15: /* !overflow */
1153 cmp
->cond
= subcc_cond
[cond
];
1154 cmp
->is_bool
= false;
1155 #ifdef TARGET_SPARC64
1157 /* Note that sign-extension works for unsigned compares as
1158 long as both operands are sign-extended. */
1159 cmp
->c1
= tcg_temp_new();
1160 cmp
->c2
= tcg_temp_new();
1161 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1162 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1166 cmp
->c1
= cpu_cc_src
;
1167 cmp
->c2
= cpu_cc_src2
;
1174 gen_helper_compute_psr(tcg_env
);
1175 dc
->cc_op
= CC_OP_FLAGS
;
1179 /* We're going to generate a boolean result. */
1180 cmp
->cond
= TCG_COND_NE
;
1181 cmp
->is_bool
= true;
1182 cmp
->c1
= r_dst
= tcg_temp_new();
1183 cmp
->c2
= tcg_constant_tl(0);
1187 gen_op_eval_bn(r_dst
);
1190 gen_op_eval_be(r_dst
, r_src
);
1193 gen_op_eval_ble(r_dst
, r_src
);
1196 gen_op_eval_bl(r_dst
, r_src
);
1199 gen_op_eval_bleu(r_dst
, r_src
);
1202 gen_op_eval_bcs(r_dst
, r_src
);
1205 gen_op_eval_bneg(r_dst
, r_src
);
1208 gen_op_eval_bvs(r_dst
, r_src
);
1211 gen_op_eval_ba(r_dst
);
1214 gen_op_eval_bne(r_dst
, r_src
);
1217 gen_op_eval_bg(r_dst
, r_src
);
1220 gen_op_eval_bge(r_dst
, r_src
);
1223 gen_op_eval_bgu(r_dst
, r_src
);
1226 gen_op_eval_bcc(r_dst
, r_src
);
1229 gen_op_eval_bpos(r_dst
, r_src
);
1232 gen_op_eval_bvc(r_dst
, r_src
);
1239 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1241 unsigned int offset
;
1244 /* For now we still generate a straight boolean result. */
1245 cmp
->cond
= TCG_COND_NE
;
1246 cmp
->is_bool
= true;
1247 cmp
->c1
= r_dst
= tcg_temp_new();
1248 cmp
->c2
= tcg_constant_tl(0);
1268 gen_op_eval_bn(r_dst
);
1271 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1274 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1277 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1280 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1283 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1286 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1289 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1292 gen_op_eval_ba(r_dst
);
1295 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1298 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1301 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1304 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1307 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1310 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1313 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1319 static const TCGCond gen_tcg_cond_reg
[8] = {
1320 TCG_COND_NEVER
, /* reserved */
1324 TCG_COND_NEVER
, /* reserved */
1330 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1332 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1333 cmp
->is_bool
= false;
1335 cmp
->c2
= tcg_constant_tl(0);
1338 #ifdef TARGET_SPARC64
1339 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1343 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1346 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1349 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1352 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1357 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1361 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1364 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1367 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1370 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1375 static void gen_op_fcmpq(int fccno
)
1379 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1382 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1385 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1388 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1393 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1397 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1400 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1403 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1406 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1411 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1415 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1418 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1421 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1424 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1429 static void gen_op_fcmpeq(int fccno
)
1433 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1436 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1439 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1442 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1449 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1451 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1454 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1456 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1459 static void gen_op_fcmpq(int fccno
)
1461 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1464 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1466 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1469 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1471 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1474 static void gen_op_fcmpeq(int fccno
)
1476 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1480 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1482 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1483 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1484 gen_exception(dc
, TT_FP_EXCP
);
1487 static int gen_trap_ifnofpu(DisasContext
*dc
)
1489 #if !defined(CONFIG_USER_ONLY)
1490 if (!dc
->fpu_enabled
) {
1491 gen_exception(dc
, TT_NFPU_INSN
);
1498 static void gen_op_clear_ieee_excp_and_FTT(void)
1500 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1503 static void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1504 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1508 src
= gen_load_fpr_F(dc
, rs
);
1509 dst
= gen_dest_fpr_F(dc
);
1511 gen(dst
, tcg_env
, src
);
1512 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1514 gen_store_fpr_F(dc
, rd
, dst
);
1517 static void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1518 void (*gen
)(TCGv_i32
, TCGv_i32
))
1522 src
= gen_load_fpr_F(dc
, rs
);
1523 dst
= gen_dest_fpr_F(dc
);
1527 gen_store_fpr_F(dc
, rd
, dst
);
1530 static void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1531 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1533 TCGv_i32 dst
, src1
, src2
;
1535 src1
= gen_load_fpr_F(dc
, rs1
);
1536 src2
= gen_load_fpr_F(dc
, rs2
);
1537 dst
= gen_dest_fpr_F(dc
);
1539 gen(dst
, tcg_env
, src1
, src2
);
1540 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1542 gen_store_fpr_F(dc
, rd
, dst
);
1545 #ifdef TARGET_SPARC64
1546 static void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1547 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1549 TCGv_i32 dst
, src1
, src2
;
1551 src1
= gen_load_fpr_F(dc
, rs1
);
1552 src2
= gen_load_fpr_F(dc
, rs2
);
1553 dst
= gen_dest_fpr_F(dc
);
1555 gen(dst
, src1
, src2
);
1557 gen_store_fpr_F(dc
, rd
, dst
);
1561 static void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1562 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1566 src
= gen_load_fpr_D(dc
, rs
);
1567 dst
= gen_dest_fpr_D(dc
, rd
);
1569 gen(dst
, tcg_env
, src
);
1570 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1572 gen_store_fpr_D(dc
, rd
, dst
);
1575 #ifdef TARGET_SPARC64
1576 static void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1577 void (*gen
)(TCGv_i64
, TCGv_i64
))
1581 src
= gen_load_fpr_D(dc
, rs
);
1582 dst
= gen_dest_fpr_D(dc
, rd
);
1586 gen_store_fpr_D(dc
, rd
, dst
);
1590 static void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1591 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1593 TCGv_i64 dst
, src1
, src2
;
1595 src1
= gen_load_fpr_D(dc
, rs1
);
1596 src2
= gen_load_fpr_D(dc
, rs2
);
1597 dst
= gen_dest_fpr_D(dc
, rd
);
1599 gen(dst
, tcg_env
, src1
, src2
);
1600 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1602 gen_store_fpr_D(dc
, rd
, dst
);
1605 #ifdef TARGET_SPARC64
1606 static void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1607 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1609 TCGv_i64 dst
, src1
, src2
;
1611 src1
= gen_load_fpr_D(dc
, rs1
);
1612 src2
= gen_load_fpr_D(dc
, rs2
);
1613 dst
= gen_dest_fpr_D(dc
, rd
);
1615 gen(dst
, src1
, src2
);
1617 gen_store_fpr_D(dc
, rd
, dst
);
1620 static void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1621 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1623 TCGv_i64 dst
, src1
, src2
;
1625 src1
= gen_load_fpr_D(dc
, rs1
);
1626 src2
= gen_load_fpr_D(dc
, rs2
);
1627 dst
= gen_dest_fpr_D(dc
, rd
);
1629 gen(dst
, cpu_gsr
, src1
, src2
);
1631 gen_store_fpr_D(dc
, rd
, dst
);
1634 static void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1635 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1637 TCGv_i64 dst
, src0
, src1
, src2
;
1639 src1
= gen_load_fpr_D(dc
, rs1
);
1640 src2
= gen_load_fpr_D(dc
, rs2
);
1641 src0
= gen_load_fpr_D(dc
, rd
);
1642 dst
= gen_dest_fpr_D(dc
, rd
);
1644 gen(dst
, src0
, src1
, src2
);
1646 gen_store_fpr_D(dc
, rd
, dst
);
1650 static void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1651 void (*gen
)(TCGv_ptr
))
1653 gen_op_load_fpr_QT1(QFPREG(rs
));
1656 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1658 gen_op_store_QT0_fpr(QFPREG(rd
));
1659 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1662 #ifdef TARGET_SPARC64
1663 static void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1664 void (*gen
)(TCGv_ptr
))
1666 gen_op_load_fpr_QT1(QFPREG(rs
));
1670 gen_op_store_QT0_fpr(QFPREG(rd
));
1671 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1675 static void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1676 void (*gen
)(TCGv_ptr
))
1678 gen_op_load_fpr_QT0(QFPREG(rs1
));
1679 gen_op_load_fpr_QT1(QFPREG(rs2
));
1682 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1684 gen_op_store_QT0_fpr(QFPREG(rd
));
1685 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1688 static void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1689 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1692 TCGv_i32 src1
, src2
;
1694 src1
= gen_load_fpr_F(dc
, rs1
);
1695 src2
= gen_load_fpr_F(dc
, rs2
);
1696 dst
= gen_dest_fpr_D(dc
, rd
);
1698 gen(dst
, tcg_env
, src1
, src2
);
1699 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1701 gen_store_fpr_D(dc
, rd
, dst
);
1704 static void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1705 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1707 TCGv_i64 src1
, src2
;
1709 src1
= gen_load_fpr_D(dc
, rs1
);
1710 src2
= gen_load_fpr_D(dc
, rs2
);
1712 gen(tcg_env
, src1
, src2
);
1713 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1715 gen_op_store_QT0_fpr(QFPREG(rd
));
1716 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1719 #ifdef TARGET_SPARC64
1720 static void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1721 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1726 src
= gen_load_fpr_F(dc
, rs
);
1727 dst
= gen_dest_fpr_D(dc
, rd
);
1729 gen(dst
, tcg_env
, src
);
1730 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1732 gen_store_fpr_D(dc
, rd
, dst
);
1736 static void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1737 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1742 src
= gen_load_fpr_F(dc
, rs
);
1743 dst
= gen_dest_fpr_D(dc
, rd
);
1745 gen(dst
, tcg_env
, src
);
1747 gen_store_fpr_D(dc
, rd
, dst
);
1750 static void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1751 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1756 src
= gen_load_fpr_D(dc
, rs
);
1757 dst
= gen_dest_fpr_F(dc
);
1759 gen(dst
, tcg_env
, src
);
1760 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1762 gen_store_fpr_F(dc
, rd
, dst
);
1765 static void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1766 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1770 gen_op_load_fpr_QT1(QFPREG(rs
));
1771 dst
= gen_dest_fpr_F(dc
);
1774 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1776 gen_store_fpr_F(dc
, rd
, dst
);
1779 static void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1780 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1784 gen_op_load_fpr_QT1(QFPREG(rs
));
1785 dst
= gen_dest_fpr_D(dc
, rd
);
1788 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1790 gen_store_fpr_D(dc
, rd
, dst
);
1793 static void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1794 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1798 src
= gen_load_fpr_F(dc
, rs
);
1802 gen_op_store_QT0_fpr(QFPREG(rd
));
1803 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1806 static void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1807 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1811 src
= gen_load_fpr_D(dc
, rs
);
1815 gen_op_store_QT0_fpr(QFPREG(rd
));
1816 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1819 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
1820 TCGv addr
, int mmu_idx
, MemOp memop
)
1822 gen_address_mask(dc
, addr
);
1823 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
| MO_ALIGN
);
1826 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
1828 TCGv m1
= tcg_constant_tl(0xff);
1829 gen_address_mask(dc
, addr
);
1830 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
1834 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1853 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
1855 int asi
= GET_FIELD(insn
, 19, 26);
1856 ASIType type
= GET_ASI_HELPER
;
1857 int mem_idx
= dc
->mem_idx
;
1859 #ifndef TARGET_SPARC64
1860 /* Before v9, all asis are immediate and privileged. */
1862 gen_exception(dc
, TT_ILL_INSN
);
1863 type
= GET_ASI_EXCP
;
1864 } else if (supervisor(dc
)
1865 /* Note that LEON accepts ASI_USERDATA in user mode, for
1866 use with CASA. Also note that previous versions of
1867 QEMU allowed (and old versions of gcc emitted) ASI_P
1868 for LEON, which is incorrect. */
1869 || (asi
== ASI_USERDATA
1870 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1872 case ASI_USERDATA
: /* User data access */
1873 mem_idx
= MMU_USER_IDX
;
1874 type
= GET_ASI_DIRECT
;
1876 case ASI_KERNELDATA
: /* Supervisor data access */
1877 mem_idx
= MMU_KERNEL_IDX
;
1878 type
= GET_ASI_DIRECT
;
1880 case ASI_M_BYPASS
: /* MMU passthrough */
1881 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1882 mem_idx
= MMU_PHYS_IDX
;
1883 type
= GET_ASI_DIRECT
;
1885 case ASI_M_BCOPY
: /* Block copy, sta access */
1886 mem_idx
= MMU_KERNEL_IDX
;
1887 type
= GET_ASI_BCOPY
;
1889 case ASI_M_BFILL
: /* Block fill, stda access */
1890 mem_idx
= MMU_KERNEL_IDX
;
1891 type
= GET_ASI_BFILL
;
1895 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1896 * permissions check in get_physical_address(..).
1898 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1900 gen_exception(dc
, TT_PRIV_INSN
);
1901 type
= GET_ASI_EXCP
;
1907 /* With v9, all asis below 0x80 are privileged. */
1908 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1909 down that bit into DisasContext. For the moment that's ok,
1910 since the direct implementations below doesn't have any ASIs
1911 in the restricted [0x30, 0x7f] range, and the check will be
1912 done properly in the helper. */
1913 if (!supervisor(dc
) && asi
< 0x80) {
1914 gen_exception(dc
, TT_PRIV_ACT
);
1915 type
= GET_ASI_EXCP
;
1918 case ASI_REAL
: /* Bypass */
1919 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1920 case ASI_REAL_L
: /* Bypass LE */
1921 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1922 case ASI_TWINX_REAL
: /* Real address, twinx */
1923 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1924 case ASI_QUAD_LDD_PHYS
:
1925 case ASI_QUAD_LDD_PHYS_L
:
1926 mem_idx
= MMU_PHYS_IDX
;
1928 case ASI_N
: /* Nucleus */
1929 case ASI_NL
: /* Nucleus LE */
1932 case ASI_NUCLEUS_QUAD_LDD
:
1933 case ASI_NUCLEUS_QUAD_LDD_L
:
1934 if (hypervisor(dc
)) {
1935 mem_idx
= MMU_PHYS_IDX
;
1937 mem_idx
= MMU_NUCLEUS_IDX
;
1940 case ASI_AIUP
: /* As if user primary */
1941 case ASI_AIUPL
: /* As if user primary LE */
1942 case ASI_TWINX_AIUP
:
1943 case ASI_TWINX_AIUP_L
:
1944 case ASI_BLK_AIUP_4V
:
1945 case ASI_BLK_AIUP_L_4V
:
1948 mem_idx
= MMU_USER_IDX
;
1950 case ASI_AIUS
: /* As if user secondary */
1951 case ASI_AIUSL
: /* As if user secondary LE */
1952 case ASI_TWINX_AIUS
:
1953 case ASI_TWINX_AIUS_L
:
1954 case ASI_BLK_AIUS_4V
:
1955 case ASI_BLK_AIUS_L_4V
:
1958 mem_idx
= MMU_USER_SECONDARY_IDX
;
1960 case ASI_S
: /* Secondary */
1961 case ASI_SL
: /* Secondary LE */
1964 case ASI_BLK_COMMIT_S
:
1971 if (mem_idx
== MMU_USER_IDX
) {
1972 mem_idx
= MMU_USER_SECONDARY_IDX
;
1973 } else if (mem_idx
== MMU_KERNEL_IDX
) {
1974 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
1977 case ASI_P
: /* Primary */
1978 case ASI_PL
: /* Primary LE */
1981 case ASI_BLK_COMMIT_P
:
2005 type
= GET_ASI_DIRECT
;
2007 case ASI_TWINX_REAL
:
2008 case ASI_TWINX_REAL_L
:
2011 case ASI_TWINX_AIUP
:
2012 case ASI_TWINX_AIUP_L
:
2013 case ASI_TWINX_AIUS
:
2014 case ASI_TWINX_AIUS_L
:
2019 case ASI_QUAD_LDD_PHYS
:
2020 case ASI_QUAD_LDD_PHYS_L
:
2021 case ASI_NUCLEUS_QUAD_LDD
:
2022 case ASI_NUCLEUS_QUAD_LDD_L
:
2023 type
= GET_ASI_DTWINX
;
2025 case ASI_BLK_COMMIT_P
:
2026 case ASI_BLK_COMMIT_S
:
2027 case ASI_BLK_AIUP_4V
:
2028 case ASI_BLK_AIUP_L_4V
:
2031 case ASI_BLK_AIUS_4V
:
2032 case ASI_BLK_AIUS_L_4V
:
2039 type
= GET_ASI_BLOCK
;
2046 type
= GET_ASI_SHORT
;
2053 type
= GET_ASI_SHORT
;
2056 /* The little-endian asis all have bit 3 set. */
2063 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2066 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2067 int insn
, MemOp memop
)
2069 DisasASI da
= get_asi(dc
, insn
, memop
);
2074 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2075 gen_exception(dc
, TT_ILL_INSN
);
2077 case GET_ASI_DIRECT
:
2078 gen_address_mask(dc
, addr
);
2079 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2083 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2084 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2087 #ifdef TARGET_SPARC64
2088 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
2091 TCGv_i64 t64
= tcg_temp_new_i64();
2092 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2093 tcg_gen_trunc_i64_tl(dst
, t64
);
2101 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2102 int insn
, MemOp memop
)
2104 DisasASI da
= get_asi(dc
, insn
, memop
);
2109 case GET_ASI_DTWINX
: /* Reserved for stda. */
2110 #ifndef TARGET_SPARC64
2111 gen_exception(dc
, TT_ILL_INSN
);
2114 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2115 /* Pre OpenSPARC CPUs don't have these */
2116 gen_exception(dc
, TT_ILL_INSN
);
2119 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2120 * are ST_BLKINIT_ ASIs */
2123 case GET_ASI_DIRECT
:
2124 gen_address_mask(dc
, addr
);
2125 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2127 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2129 /* Copy 32 bytes from the address in SRC to ADDR. */
2130 /* ??? The original qemu code suggests 4-byte alignment, dropping
2131 the low bits, but the only place I can see this used is in the
2132 Linux kernel with 32 byte alignment, which would make more sense
2133 as a cacheline-style operation. */
2135 TCGv saddr
= tcg_temp_new();
2136 TCGv daddr
= tcg_temp_new();
2137 TCGv four
= tcg_constant_tl(4);
2138 TCGv_i32 tmp
= tcg_temp_new_i32();
2141 tcg_gen_andi_tl(saddr
, src
, -4);
2142 tcg_gen_andi_tl(daddr
, addr
, -4);
2143 for (i
= 0; i
< 32; i
+= 4) {
2144 /* Since the loads and stores are paired, allow the
2145 copy to happen in the host endianness. */
2146 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2147 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2148 tcg_gen_add_tl(saddr
, saddr
, four
);
2149 tcg_gen_add_tl(daddr
, daddr
, four
);
2156 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2157 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2160 #ifdef TARGET_SPARC64
2161 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
2164 TCGv_i64 t64
= tcg_temp_new_i64();
2165 tcg_gen_extu_tl_i64(t64
, src
);
2166 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2170 /* A write to a TLB register may alter page maps. End the TB. */
2171 dc
->npc
= DYNAMIC_PC
;
2177 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2178 TCGv addr
, int insn
)
2180 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2185 case GET_ASI_DIRECT
:
2186 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2189 /* ??? Should be DAE_invalid_asi. */
2190 gen_exception(dc
, TT_DATA_ACCESS
);
2195 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2198 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2204 case GET_ASI_DIRECT
:
2205 oldv
= tcg_temp_new();
2206 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2207 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2208 gen_store_gpr(dc
, rd
, oldv
);
2211 /* ??? Should be DAE_invalid_asi. */
2212 gen_exception(dc
, TT_DATA_ACCESS
);
2217 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2219 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2224 case GET_ASI_DIRECT
:
2225 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2228 /* ??? In theory, this should be raise DAE_invalid_asi.
2229 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2230 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2231 gen_helper_exit_atomic(tcg_env
);
2233 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2234 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2238 t64
= tcg_temp_new_i64();
2239 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2241 s64
= tcg_constant_i64(0xff);
2242 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
2244 tcg_gen_trunc_i64_tl(dst
, t64
);
2247 dc
->npc
= DYNAMIC_PC
;
2254 #ifdef TARGET_SPARC64
2255 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2256 int insn
, int size
, int rd
)
2258 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2266 case GET_ASI_DIRECT
:
2267 gen_address_mask(dc
, addr
);
2270 d32
= gen_dest_fpr_F(dc
);
2271 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2272 gen_store_fpr_F(dc
, rd
, d32
);
2275 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2276 da
.memop
| MO_ALIGN_4
);
2279 d64
= tcg_temp_new_i64();
2280 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2281 tcg_gen_addi_tl(addr
, addr
, 8);
2282 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2283 da
.memop
| MO_ALIGN_4
);
2284 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2287 g_assert_not_reached();
2292 /* Valid for lddfa on aligned registers only. */
2293 if (size
== 8 && (rd
& 7) == 0) {
2298 gen_address_mask(dc
, addr
);
2300 /* The first operation checks required alignment. */
2301 memop
= da
.memop
| MO_ALIGN_64
;
2302 eight
= tcg_constant_tl(8);
2303 for (i
= 0; ; ++i
) {
2304 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2309 tcg_gen_add_tl(addr
, addr
, eight
);
2313 gen_exception(dc
, TT_ILL_INSN
);
2318 /* Valid for lddfa only. */
2320 gen_address_mask(dc
, addr
);
2321 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2322 da
.memop
| MO_ALIGN
);
2324 gen_exception(dc
, TT_ILL_INSN
);
2330 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2331 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
| MO_ALIGN
);
2334 /* According to the table in the UA2011 manual, the only
2335 other asis that are valid for ldfa/lddfa/ldqfa are
2336 the NO_FAULT asis. We still need a helper for these,
2337 but we can just use the integer asi helper for them. */
2340 d64
= tcg_temp_new_i64();
2341 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2342 d32
= gen_dest_fpr_F(dc
);
2343 tcg_gen_extrl_i64_i32(d32
, d64
);
2344 gen_store_fpr_F(dc
, rd
, d32
);
2347 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
, r_asi
, r_mop
);
2350 d64
= tcg_temp_new_i64();
2351 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2352 tcg_gen_addi_tl(addr
, addr
, 8);
2353 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], tcg_env
, addr
, r_asi
, r_mop
);
2354 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2357 g_assert_not_reached();
2364 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2365 int insn
, int size
, int rd
)
2367 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2374 case GET_ASI_DIRECT
:
2375 gen_address_mask(dc
, addr
);
2378 d32
= gen_load_fpr_F(dc
, rd
);
2379 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2382 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2383 da
.memop
| MO_ALIGN_4
);
2386 /* Only 4-byte alignment required. However, it is legal for the
2387 cpu to signal the alignment fault, and the OS trap handler is
2388 required to fix it up. Requiring 16-byte alignment here avoids
2389 having to probe the second page before performing the first
2391 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2392 da
.memop
| MO_ALIGN_16
);
2393 tcg_gen_addi_tl(addr
, addr
, 8);
2394 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2397 g_assert_not_reached();
2402 /* Valid for stdfa on aligned registers only. */
2403 if (size
== 8 && (rd
& 7) == 0) {
2408 gen_address_mask(dc
, addr
);
2410 /* The first operation checks required alignment. */
2411 memop
= da
.memop
| MO_ALIGN_64
;
2412 eight
= tcg_constant_tl(8);
2413 for (i
= 0; ; ++i
) {
2414 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2419 tcg_gen_add_tl(addr
, addr
, eight
);
2423 gen_exception(dc
, TT_ILL_INSN
);
2428 /* Valid for stdfa only. */
2430 gen_address_mask(dc
, addr
);
2431 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2432 da
.memop
| MO_ALIGN
);
2434 gen_exception(dc
, TT_ILL_INSN
);
2439 /* According to the table in the UA2011 manual, the only
2440 other asis that are valid for ldfa/lddfa/ldqfa are
2441 the PST* asis, which aren't currently handled. */
2442 gen_exception(dc
, TT_ILL_INSN
);
2447 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2449 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2450 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2451 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2457 case GET_ASI_DTWINX
:
2458 gen_address_mask(dc
, addr
);
2459 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2460 tcg_gen_addi_tl(addr
, addr
, 8);
2461 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2464 case GET_ASI_DIRECT
:
2466 TCGv_i64 tmp
= tcg_temp_new_i64();
2468 gen_address_mask(dc
, addr
);
2469 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2471 /* Note that LE ldda acts as if each 32-bit register
2472 result is byte swapped. Having just performed one
2473 64-bit bswap, we need now to swap the writebacks. */
2474 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2475 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2477 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2483 /* ??? In theory we've handled all of the ASIs that are valid
2484 for ldda, and this should raise DAE_invalid_asi. However,
2485 real hardware allows others. This can be seen with e.g.
2486 FreeBSD 10.3 wrt ASI_IC_TAG. */
2488 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2489 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2490 TCGv_i64 tmp
= tcg_temp_new_i64();
2493 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2496 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2497 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2499 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2505 gen_store_gpr(dc
, rd
, hi
);
2506 gen_store_gpr(dc
, rd
+ 1, lo
);
2509 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2512 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2513 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2519 case GET_ASI_DTWINX
:
2520 gen_address_mask(dc
, addr
);
2521 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2522 tcg_gen_addi_tl(addr
, addr
, 8);
2523 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2526 case GET_ASI_DIRECT
:
2528 TCGv_i64 t64
= tcg_temp_new_i64();
2530 /* Note that LE stda acts as if each 32-bit register result is
2531 byte swapped. We will perform one 64-bit LE store, so now
2532 we must swap the order of the construction. */
2533 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2534 tcg_gen_concat32_i64(t64
, lo
, hi
);
2536 tcg_gen_concat32_i64(t64
, hi
, lo
);
2538 gen_address_mask(dc
, addr
);
2539 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2544 /* ??? In theory we've handled all of the ASIs that are valid
2545 for stda, and this should raise DAE_invalid_asi. */
2547 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2548 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2549 TCGv_i64 t64
= tcg_temp_new_i64();
2552 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2553 tcg_gen_concat32_i64(t64
, lo
, hi
);
2555 tcg_gen_concat32_i64(t64
, hi
, lo
);
2559 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2565 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2568 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2574 case GET_ASI_DIRECT
:
2575 oldv
= tcg_temp_new();
2576 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2577 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2578 gen_store_gpr(dc
, rd
, oldv
);
2581 /* ??? Should be DAE_invalid_asi. */
2582 gen_exception(dc
, TT_DATA_ACCESS
);
2587 #elif !defined(CONFIG_USER_ONLY)
2588 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2590 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2591 whereby "rd + 1" elicits "error: array subscript is above array".
2592 Since we have already asserted that rd is even, the semantics
2594 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2595 TCGv hi
= gen_dest_gpr(dc
, rd
);
2596 TCGv_i64 t64
= tcg_temp_new_i64();
2597 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2602 case GET_ASI_DIRECT
:
2603 gen_address_mask(dc
, addr
);
2604 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2608 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2609 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2612 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2617 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2618 gen_store_gpr(dc
, rd
| 1, lo
);
2619 gen_store_gpr(dc
, rd
, hi
);
2622 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2625 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2626 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2627 TCGv_i64 t64
= tcg_temp_new_i64();
2629 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2634 case GET_ASI_DIRECT
:
2635 gen_address_mask(dc
, addr
);
2636 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2639 /* Store 32 bytes of T64 to ADDR. */
2640 /* ??? The original qemu code suggests 8-byte alignment, dropping
2641 the low bits, but the only place I can see this used is in the
2642 Linux kernel with 32 byte alignment, which would make more sense
2643 as a cacheline-style operation. */
2645 TCGv d_addr
= tcg_temp_new();
2646 TCGv eight
= tcg_constant_tl(8);
2649 tcg_gen_andi_tl(d_addr
, addr
, -8);
2650 for (i
= 0; i
< 32; i
+= 8) {
2651 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2652 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2658 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2659 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2662 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2669 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2671 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2672 return gen_load_gpr(dc
, rs1
);
2675 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2677 if (IS_IMM
) { /* immediate */
2678 target_long simm
= GET_FIELDs(insn
, 19, 31);
2679 TCGv t
= tcg_temp_new();
2680 tcg_gen_movi_tl(t
, simm
);
2682 } else { /* register */
2683 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2684 return gen_load_gpr(dc
, rs2
);
2688 #ifdef TARGET_SPARC64
2689 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2691 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2693 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2694 or fold the comparison down to 32 bits and use movcond_i32. Choose
2696 c32
= tcg_temp_new_i32();
2698 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2700 TCGv_i64 c64
= tcg_temp_new_i64();
2701 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2702 tcg_gen_extrl_i64_i32(c32
, c64
);
2705 s1
= gen_load_fpr_F(dc
, rs
);
2706 s2
= gen_load_fpr_F(dc
, rd
);
2707 dst
= gen_dest_fpr_F(dc
);
2708 zero
= tcg_constant_i32(0);
2710 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2712 gen_store_fpr_F(dc
, rd
, dst
);
2715 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2717 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2718 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2719 gen_load_fpr_D(dc
, rs
),
2720 gen_load_fpr_D(dc
, rd
));
2721 gen_store_fpr_D(dc
, rd
, dst
);
2724 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2726 int qd
= QFPREG(rd
);
2727 int qs
= QFPREG(rs
);
2729 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2730 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2731 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2732 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2734 gen_update_fprs_dirty(dc
, qd
);
2737 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2739 TCGv_i32 r_tl
= tcg_temp_new_i32();
2741 /* load env->tl into r_tl */
2742 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2744 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2745 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2747 /* calculate offset to current trap state from env->ts, reuse r_tl */
2748 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2749 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2751 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2753 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2754 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2755 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2759 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2760 int width
, bool cc
, bool left
)
2763 uint64_t amask
, tabl
, tabr
;
2764 int shift
, imask
, omask
;
2767 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2768 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2769 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2770 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2771 dc
->cc_op
= CC_OP_SUB
;
2774 /* Theory of operation: there are two tables, left and right (not to
2775 be confused with the left and right versions of the opcode). These
2776 are indexed by the low 3 bits of the inputs. To make things "easy",
2777 these tables are loaded into two constants, TABL and TABR below.
2778 The operation index = (input & imask) << shift calculates the index
2779 into the constant, while val = (table >> index) & omask calculates
2780 the value we're looking for. */
2787 tabl
= 0x80c0e0f0f8fcfeffULL
;
2788 tabr
= 0xff7f3f1f0f070301ULL
;
2790 tabl
= 0x0103070f1f3f7fffULL
;
2791 tabr
= 0xfffefcf8f0e0c080ULL
;
2811 tabl
= (2 << 2) | 3;
2812 tabr
= (3 << 2) | 1;
2814 tabl
= (1 << 2) | 3;
2815 tabr
= (3 << 2) | 2;
2822 lo1
= tcg_temp_new();
2823 lo2
= tcg_temp_new();
2824 tcg_gen_andi_tl(lo1
, s1
, imask
);
2825 tcg_gen_andi_tl(lo2
, s2
, imask
);
2826 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2827 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2829 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
2830 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
2831 tcg_gen_andi_tl(lo1
, lo1
, omask
);
2832 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2836 amask
&= 0xffffffffULL
;
2838 tcg_gen_andi_tl(s1
, s1
, amask
);
2839 tcg_gen_andi_tl(s2
, s2
, amask
);
2841 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2842 tcg_gen_and_tl(lo2
, lo2
, lo1
);
2843 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
2846 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2848 TCGv tmp
= tcg_temp_new();
2850 tcg_gen_add_tl(tmp
, s1
, s2
);
2851 tcg_gen_andi_tl(dst
, tmp
, -8);
2853 tcg_gen_neg_tl(tmp
, tmp
);
2855 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2858 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2862 t1
= tcg_temp_new();
2863 t2
= tcg_temp_new();
2864 shift
= tcg_temp_new();
2866 tcg_gen_andi_tl(shift
, gsr
, 7);
2867 tcg_gen_shli_tl(shift
, shift
, 3);
2868 tcg_gen_shl_tl(t1
, s1
, shift
);
2870 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2871 shift of (up to 63) followed by a constant shift of 1. */
2872 tcg_gen_xori_tl(shift
, shift
, 63);
2873 tcg_gen_shr_tl(t2
, s2
, shift
);
2874 tcg_gen_shri_tl(t2
, t2
, 1);
2876 tcg_gen_or_tl(dst
, t1
, t2
);
2880 /* Include the auto-generated decoder. */
2881 #include "decode-insns.c.inc"
2883 #define TRANS(NAME, AVAIL, FUNC, ...) \
2884 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2885 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2887 #define avail_ALL(C) true
2888 #ifdef TARGET_SPARC64
2889 # define avail_32(C) false
2890 # define avail_ASR17(C) false
2891 # define avail_POWERDOWN(C) false
2892 # define avail_64(C) true
2893 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2894 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2896 # define avail_32(C) true
2897 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2898 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2899 # define avail_64(C) false
2900 # define avail_GL(C) false
2901 # define avail_HYPV(C) false
2904 /* Default case for non jump instructions. */
2905 static bool advance_pc(DisasContext
*dc
)
2910 case DYNAMIC_PC_LOOKUP
:
2915 /* we can do a static jump */
2916 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
2917 dc
->base
.is_jmp
= DISAS_NORETURN
;
2920 g_assert_not_reached();
2924 dc
->npc
= dc
->npc
+ 4;
2930 * Major opcodes 00 and 01 -- branches, call, and sethi
2933 static bool advance_jump_uncond_never(DisasContext
*dc
, bool annul
)
2936 dc
->pc
= dc
->npc
+ 4;
2937 dc
->npc
= dc
->pc
+ 4;
2940 dc
->npc
= dc
->pc
+ 4;
2945 static bool advance_jump_uncond_always(DisasContext
*dc
, bool annul
,
2954 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2959 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
2960 bool annul
, target_ulong dest
)
2962 target_ulong npc
= dc
->npc
;
2965 TCGLabel
*l1
= gen_new_label();
2967 tcg_gen_brcond_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
2968 gen_goto_tb(dc
, 0, npc
, dest
);
2970 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
2972 dc
->base
.is_jmp
= DISAS_NORETURN
;
2977 case DYNAMIC_PC_LOOKUP
:
2978 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2979 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2980 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
2982 tcg_constant_tl(dest
), cpu_npc
);
2986 g_assert_not_reached();
2990 dc
->jump_pc
[0] = dest
;
2991 dc
->jump_pc
[1] = npc
+ 4;
2994 tcg_gen_mov_tl(cpu_cond
, cmp
->c1
);
2996 tcg_gen_setcond_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
3003 static bool raise_priv(DisasContext
*dc
)
3005 gen_exception(dc
, TT_PRIV_INSN
);
3009 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
3011 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3016 return advance_jump_uncond_never(dc
, a
->a
);
3018 return advance_jump_uncond_always(dc
, a
->a
, target
);
3022 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
3023 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3027 TRANS(Bicc
, ALL
, do_bpcc
, a
)
3028 TRANS(BPcc
, 64, do_bpcc
, a
)
3030 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
3032 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3035 if (gen_trap_ifnofpu(dc
)) {
3040 return advance_jump_uncond_never(dc
, a
->a
);
3042 return advance_jump_uncond_always(dc
, a
->a
, target
);
3046 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
3047 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3051 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
3052 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
3054 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
3056 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3059 if (!avail_64(dc
)) {
3062 if (gen_tcg_cond_reg
[a
->cond
] == TCG_COND_NEVER
) {
3067 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
3068 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3071 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
3073 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3075 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
3081 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
3084 * For sparc32, always generate the no-coprocessor exception.
3085 * For sparc64, always generate illegal instruction.
3087 #ifdef TARGET_SPARC64
3090 gen_exception(dc
, TT_NCP_INSN
);
3095 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
3097 /* Special-case %g0 because that's the canonical nop. */
3099 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
3101 return advance_pc(dc
);
3105 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3108 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
3109 int rs1
, bool imm
, int rs2_or_imm
)
3111 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3112 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3119 return advance_pc(dc
);
3123 * Immediate traps are the most common case. Since this value is
3124 * live across the branch, it really pays to evaluate the constant.
3126 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
3127 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
3129 trap
= tcg_temp_new_i32();
3130 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
3132 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
3134 TCGv_i32 t2
= tcg_temp_new_i32();
3135 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
3136 tcg_gen_add_i32(trap
, trap
, t2
);
3138 tcg_gen_andi_i32(trap
, trap
, mask
);
3139 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3145 gen_helper_raise_exception(tcg_env
, trap
);
3146 dc
->base
.is_jmp
= DISAS_NORETURN
;
3150 /* Conditional trap. */
3152 lab
= delay_exceptionv(dc
, trap
);
3153 gen_compare(&cmp
, cc
, cond
, dc
);
3154 tcg_gen_brcond_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
3156 return advance_pc(dc
);
3159 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
3161 if (avail_32(dc
) && a
->cc
) {
3164 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
3167 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
3172 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
3175 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
3180 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
3183 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
3185 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
3186 return advance_pc(dc
);
3189 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
3195 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3196 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
3199 /* For #Sync, etc, end the TB to recognize interrupts. */
3200 dc
->base
.is_jmp
= DISAS_EXIT
;
3202 return advance_pc(dc
);
3205 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
3206 TCGv (*func
)(DisasContext
*, TCGv
))
3209 return raise_priv(dc
);
3211 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
3212 return advance_pc(dc
);
3215 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
3220 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
3223 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
3224 * 32-bit cpus like sparcv7, which ignores the rs1 field.
3225 * This matches after all other ASR, so Leon3 Asr17 is handled first.
3227 if (avail_64(dc
) && a
->rs1
!= 0) {
3230 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
3233 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
3238 * TODO: There are many more fields to be filled,
3239 * some of which are writable.
3241 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
3242 val
|= 1 << 8; /* [8] V8 */
3244 return tcg_constant_tl(val
);
3247 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
3249 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
3252 gen_helper_rdccr(dst
, tcg_env
);
3256 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
3258 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
3260 #ifdef TARGET_SPARC64
3261 return tcg_constant_tl(dc
->asi
);
3263 qemu_build_not_reached();
3267 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
3269 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
3271 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3273 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3274 if (translator_io_start(&dc
->base
)) {
3275 dc
->base
.is_jmp
= DISAS_EXIT
;
3277 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3278 tcg_constant_i32(dc
->mem_idx
));
3282 /* TODO: non-priv access only allowed when enabled. */
3283 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
3285 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
3287 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
3290 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
3292 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
3294 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
3298 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
3300 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
3302 gen_trap_ifnofpu(dc
);
3306 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
3308 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
3310 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
3314 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
3316 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
3318 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3322 /* TODO: non-priv access only allowed when enabled. */
3323 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
3325 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
3327 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3329 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3330 if (translator_io_start(&dc
->base
)) {
3331 dc
->base
.is_jmp
= DISAS_EXIT
;
3333 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3334 tcg_constant_i32(dc
->mem_idx
));
3338 /* TODO: non-priv access only allowed when enabled. */
3339 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
3341 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
3343 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3347 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3348 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
3351 * UltraSPARC-T1 Strand status.
3352 * HYPV check maybe not enough, UA2005 & UA2007 describe
3353 * this ASR as impl. dep
3355 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
3357 return tcg_constant_tl(1);
3360 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
3362 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
3365 gen_helper_rdpsr(dst
, tcg_env
);
3369 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
3371 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
3373 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
3377 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
3379 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
3381 TCGv_i32 tl
= tcg_temp_new_i32();
3382 TCGv_ptr tp
= tcg_temp_new_ptr();
3384 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3385 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3386 tcg_gen_shli_i32(tl
, tl
, 3);
3387 tcg_gen_ext_i32_ptr(tp
, tl
);
3388 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3390 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
3394 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
3396 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
3398 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
3402 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
3404 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
3406 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
3410 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
3412 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
3414 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
3418 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
3420 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
3422 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3426 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
3429 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
3431 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
3435 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
3437 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
3439 #ifdef TARGET_SPARC64
3440 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3442 gen_load_trap_state_at_tl(r_tsptr
);
3443 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
3446 qemu_build_not_reached();
3450 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
3452 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
3454 #ifdef TARGET_SPARC64
3455 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3457 gen_load_trap_state_at_tl(r_tsptr
);
3458 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
3461 qemu_build_not_reached();
3465 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
3467 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
3469 #ifdef TARGET_SPARC64
3470 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3472 gen_load_trap_state_at_tl(r_tsptr
);
3473 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
3476 qemu_build_not_reached();
3480 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
3482 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
3484 #ifdef TARGET_SPARC64
3485 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3487 gen_load_trap_state_at_tl(r_tsptr
);
3488 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
3491 qemu_build_not_reached();
3495 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
3496 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
3498 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
3503 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3504 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3506 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
3508 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
3512 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
3514 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
3516 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
3520 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
3522 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
3524 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
3528 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
3530 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
3532 gen_helper_rdcwp(dst
, tcg_env
);
3536 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
3538 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
3540 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
3544 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
3546 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
3548 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
3552 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
3555 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3557 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3561 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3563 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3565 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3569 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3571 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3573 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3577 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3579 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3581 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3585 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3587 /* UA2005 strand status */
3588 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3590 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3594 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3596 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3598 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3602 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3604 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3607 gen_helper_flushw(tcg_env
);
3608 return advance_pc(dc
);
3613 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3614 void (*func
)(DisasContext
*, TCGv
))
3618 /* For simplicity, we under-decoded the rs2 form. */
3619 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3623 return raise_priv(dc
);
3626 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3627 src
= tcg_constant_tl(a
->rs2_or_imm
);
3629 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3630 if (a
->rs2_or_imm
== 0) {
3633 src
= tcg_temp_new();
3635 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3637 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3642 return advance_pc(dc
);
3645 static void do_wry(DisasContext
*dc
, TCGv src
)
3647 tcg_gen_ext32u_tl(cpu_y
, src
);
3650 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3652 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3654 gen_helper_wrccr(tcg_env
, src
);
3657 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3659 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3661 TCGv tmp
= tcg_temp_new();
3663 tcg_gen_ext8u_tl(tmp
, src
);
3664 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3665 /* End TB to notice changed ASI. */
3666 dc
->base
.is_jmp
= DISAS_EXIT
;
3669 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3671 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3673 #ifdef TARGET_SPARC64
3674 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3676 dc
->base
.is_jmp
= DISAS_EXIT
;
3678 qemu_build_not_reached();
3682 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3684 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3686 gen_trap_ifnofpu(dc
);
3687 tcg_gen_mov_tl(cpu_gsr
, src
);
3690 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3692 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3694 gen_helper_set_softint(tcg_env
, src
);
3697 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3699 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3701 gen_helper_clear_softint(tcg_env
, src
);
3704 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3706 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3708 gen_helper_write_softint(tcg_env
, src
);
3711 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3713 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3715 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3717 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3718 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3719 translator_io_start(&dc
->base
);
3720 gen_helper_tick_set_limit(r_tickptr
, src
);
3721 /* End TB to handle timer interrupt */
3722 dc
->base
.is_jmp
= DISAS_EXIT
;
3725 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3727 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3729 #ifdef TARGET_SPARC64
3730 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3732 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3733 translator_io_start(&dc
->base
);
3734 gen_helper_tick_set_count(r_tickptr
, src
);
3735 /* End TB to handle timer interrupt */
3736 dc
->base
.is_jmp
= DISAS_EXIT
;
3738 qemu_build_not_reached();
3742 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3744 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3746 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3748 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3749 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3750 translator_io_start(&dc
->base
);
3751 gen_helper_tick_set_limit(r_tickptr
, src
);
3752 /* End TB to handle timer interrupt */
3753 dc
->base
.is_jmp
= DISAS_EXIT
;
3756 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3758 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3761 gen_helper_power_down(tcg_env
);
3764 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3766 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3768 gen_helper_wrpsr(tcg_env
, src
);
3769 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3770 dc
->cc_op
= CC_OP_FLAGS
;
3771 dc
->base
.is_jmp
= DISAS_EXIT
;
3774 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3776 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3778 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3779 TCGv tmp
= tcg_temp_new();
3781 tcg_gen_andi_tl(tmp
, src
, mask
);
3782 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3785 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3787 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3789 #ifdef TARGET_SPARC64
3790 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3792 gen_load_trap_state_at_tl(r_tsptr
);
3793 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3795 qemu_build_not_reached();
3799 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3801 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3803 #ifdef TARGET_SPARC64
3804 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3806 gen_load_trap_state_at_tl(r_tsptr
);
3807 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3809 qemu_build_not_reached();
3813 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3815 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3817 #ifdef TARGET_SPARC64
3818 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3820 gen_load_trap_state_at_tl(r_tsptr
);
3821 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3823 qemu_build_not_reached();
3827 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3829 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3831 #ifdef TARGET_SPARC64
3832 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3834 gen_load_trap_state_at_tl(r_tsptr
);
3835 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3837 qemu_build_not_reached();
3841 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3843 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3845 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3847 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3848 translator_io_start(&dc
->base
);
3849 gen_helper_tick_set_count(r_tickptr
, src
);
3850 /* End TB to handle timer interrupt */
3851 dc
->base
.is_jmp
= DISAS_EXIT
;
3854 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3856 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3858 tcg_gen_mov_tl(cpu_tbr
, src
);
3861 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3863 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3866 if (translator_io_start(&dc
->base
)) {
3867 dc
->base
.is_jmp
= DISAS_EXIT
;
3869 gen_helper_wrpstate(tcg_env
, src
);
3870 dc
->npc
= DYNAMIC_PC
;
3873 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3875 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3878 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3879 dc
->npc
= DYNAMIC_PC
;
3882 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3884 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3886 if (translator_io_start(&dc
->base
)) {
3887 dc
->base
.is_jmp
= DISAS_EXIT
;
3889 gen_helper_wrpil(tcg_env
, src
);
3892 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3894 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3896 gen_helper_wrcwp(tcg_env
, src
);
3899 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3901 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3903 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3906 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3908 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3910 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3913 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3915 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3917 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3920 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3922 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3924 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3927 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3929 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3931 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3934 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3936 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3938 gen_helper_wrgl(tcg_env
, src
);
3941 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
3943 /* UA2005 strand status */
3944 static void do_wrssr(DisasContext
*dc
, TCGv src
)
3946 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
3949 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
3951 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3953 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
3955 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
3956 dc
->base
.is_jmp
= DISAS_EXIT
;
3959 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
3961 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
3963 TCGv_i32 tl
= tcg_temp_new_i32();
3964 TCGv_ptr tp
= tcg_temp_new_ptr();
3966 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3967 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3968 tcg_gen_shli_i32(tl
, tl
, 3);
3969 tcg_gen_ext_i32_ptr(tp
, tl
);
3970 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3972 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
3975 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
3977 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
3979 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
3982 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
3984 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
3986 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
3989 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
3991 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
3993 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3995 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3996 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
3997 translator_io_start(&dc
->base
);
3998 gen_helper_tick_set_limit(r_tickptr
, src
);
3999 /* End TB to handle timer interrupt */
4000 dc
->base
.is_jmp
= DISAS_EXIT
;
4003 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
4006 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
4008 if (!supervisor(dc
)) {
4009 return raise_priv(dc
);
4012 gen_helper_saved(tcg_env
);
4014 gen_helper_restored(tcg_env
);
4016 return advance_pc(dc
);
4019 TRANS(SAVED
, 64, do_saved_restored
, true)
4020 TRANS(RESTORED
, 64, do_saved_restored
, false)
4022 static bool trans_NOP_v7(DisasContext
*dc
, arg_NOP_v7
*a
)
4025 * TODO: Need a feature bit for sparcv8.
4026 * In the meantime, treat all 32-bit cpus like sparcv7.
4029 return advance_pc(dc
);
4034 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
4035 void (*func
)(TCGv
, TCGv
, TCGv
),
4036 void (*funci
)(TCGv
, TCGv
, target_long
))
4040 /* For simplicity, we under-decoded the rs2 form. */
4041 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4048 dst
= gen_dest_gpr(dc
, a
->rd
);
4050 src1
= gen_load_gpr(dc
, a
->rs1
);
4052 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4054 funci(dst
, src1
, a
->rs2_or_imm
);
4056 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
4059 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4061 gen_store_gpr(dc
, a
->rd
, dst
);
4064 tcg_gen_movi_i32(cpu_cc_op
, cc_op
);
4067 return advance_pc(dc
);
4070 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
4071 void (*func
)(TCGv
, TCGv
, TCGv
),
4072 void (*funci
)(TCGv
, TCGv
, target_long
),
4073 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
4076 return do_arith_int(dc
, a
, cc_op
, func_cc
, NULL
);
4078 return do_arith_int(dc
, a
, cc_op
, func
, funci
);
4081 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
4082 void (*func
)(TCGv
, TCGv
, TCGv
),
4083 void (*funci
)(TCGv
, TCGv
, target_long
))
4085 return do_arith_int(dc
, a
, CC_OP_LOGIC
, func
, funci
);
4088 TRANS(ADD
, ALL
, do_arith
, a
, CC_OP_ADD
,
4089 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
)
4090 TRANS(SUB
, ALL
, do_arith
, a
, CC_OP_SUB
,
4091 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
)
4093 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
4094 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
4095 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
4096 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
4097 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
4099 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4101 /* OR with %g0 is the canonical alias for MOV. */
4102 if (!a
->cc
&& a
->rs1
== 0) {
4103 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4104 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
4105 } else if (a
->rs2_or_imm
& ~0x1f) {
4106 /* For simplicity, we under-decoded the rs2 form. */
4109 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
4111 return advance_pc(dc
);
4113 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
4116 static bool trans_ADDC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4118 switch (dc
->cc_op
) {
4121 /* Carry is known to be zero. Fall back to plain ADD. */
4122 return do_arith(dc
, a
, CC_OP_ADD
,
4123 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
);
4127 return do_arith(dc
, a
, CC_OP_ADDX
,
4128 gen_op_addc_add
, NULL
, gen_op_addccc_add
);
4132 return do_arith(dc
, a
, CC_OP_ADDX
,
4133 gen_op_addc_sub
, NULL
, gen_op_addccc_sub
);
4135 return do_arith(dc
, a
, CC_OP_ADDX
,
4136 gen_op_addc_generic
, NULL
, gen_op_addccc_generic
);
4140 #define CHECK_IU_FEATURE(dc, FEATURE) \
4141 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4143 #define CHECK_FPU_FEATURE(dc, FEATURE) \
4144 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4147 /* before an instruction, dc->pc must be static */
4148 static void disas_sparc_legacy(DisasContext
*dc
, unsigned int insn
)
4150 unsigned int opc
, rs1
, rs2
, rd
;
4151 TCGv cpu_src1
, cpu_src2
;
4152 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
4153 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
4156 opc
= GET_FIELD(insn
, 0, 1);
4157 rd
= GET_FIELD(insn
, 2, 6);
4161 goto illegal_insn
; /* in decodetree */
4163 g_assert_not_reached(); /* in decodetree */
4164 case 2: /* FPU & Logical Operations */
4166 unsigned int xop
__attribute__((unused
)) = GET_FIELD(insn
, 7, 12);
4167 TCGv cpu_dst
__attribute__((unused
)) = tcg_temp_new();
4168 TCGv cpu_tmp0
__attribute__((unused
));
4170 if (xop
== 0x34) { /* FPU Operations */
4171 if (gen_trap_ifnofpu(dc
)) {
4174 gen_op_clear_ieee_excp_and_FTT();
4175 rs1
= GET_FIELD(insn
, 13, 17);
4176 rs2
= GET_FIELD(insn
, 27, 31);
4177 xop
= GET_FIELD(insn
, 18, 26);
4180 case 0x1: /* fmovs */
4181 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4182 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4184 case 0x5: /* fnegs */
4185 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
4187 case 0x9: /* fabss */
4188 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
4190 case 0x29: /* fsqrts */
4191 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
4193 case 0x2a: /* fsqrtd */
4194 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
4196 case 0x2b: /* fsqrtq */
4197 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4198 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
4200 case 0x41: /* fadds */
4201 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
4203 case 0x42: /* faddd */
4204 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
4206 case 0x43: /* faddq */
4207 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4208 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
4210 case 0x45: /* fsubs */
4211 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
4213 case 0x46: /* fsubd */
4214 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
4216 case 0x47: /* fsubq */
4217 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4218 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
4220 case 0x49: /* fmuls */
4221 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
4223 case 0x4a: /* fmuld */
4224 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
4226 case 0x4b: /* fmulq */
4227 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4228 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
4230 case 0x4d: /* fdivs */
4231 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
4233 case 0x4e: /* fdivd */
4234 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
4236 case 0x4f: /* fdivq */
4237 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4238 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
4240 case 0x69: /* fsmuld */
4241 CHECK_FPU_FEATURE(dc
, FSMULD
);
4242 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
4244 case 0x6e: /* fdmulq */
4245 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4246 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
4248 case 0xc4: /* fitos */
4249 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
4251 case 0xc6: /* fdtos */
4252 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
4254 case 0xc7: /* fqtos */
4255 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4256 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
4258 case 0xc8: /* fitod */
4259 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
4261 case 0xc9: /* fstod */
4262 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
4264 case 0xcb: /* fqtod */
4265 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4266 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
4268 case 0xcc: /* fitoq */
4269 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4270 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
4272 case 0xcd: /* fstoq */
4273 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4274 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
4276 case 0xce: /* fdtoq */
4277 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4278 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
4280 case 0xd1: /* fstoi */
4281 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
4283 case 0xd2: /* fdtoi */
4284 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
4286 case 0xd3: /* fqtoi */
4287 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4288 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
4290 #ifdef TARGET_SPARC64
4291 case 0x2: /* V9 fmovd */
4292 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4293 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4295 case 0x3: /* V9 fmovq */
4296 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4297 gen_move_Q(dc
, rd
, rs2
);
4299 case 0x6: /* V9 fnegd */
4300 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
4302 case 0x7: /* V9 fnegq */
4303 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4304 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
4306 case 0xa: /* V9 fabsd */
4307 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
4309 case 0xb: /* V9 fabsq */
4310 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4311 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
4313 case 0x81: /* V9 fstox */
4314 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
4316 case 0x82: /* V9 fdtox */
4317 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
4319 case 0x83: /* V9 fqtox */
4320 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4321 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
4323 case 0x84: /* V9 fxtos */
4324 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
4326 case 0x88: /* V9 fxtod */
4327 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
4329 case 0x8c: /* V9 fxtoq */
4330 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4331 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
4337 } else if (xop
== 0x35) { /* FPU Operations */
4338 #ifdef TARGET_SPARC64
4341 if (gen_trap_ifnofpu(dc
)) {
4344 gen_op_clear_ieee_excp_and_FTT();
4345 rs1
= GET_FIELD(insn
, 13, 17);
4346 rs2
= GET_FIELD(insn
, 27, 31);
4347 xop
= GET_FIELD(insn
, 18, 26);
4349 #ifdef TARGET_SPARC64
4353 cond = GET_FIELD_SP(insn, 10, 12); \
4354 cpu_src1 = get_src1(dc, insn); \
4355 gen_compare_reg(&cmp, cond, cpu_src1); \
4356 gen_fmov##sz(dc, &cmp, rd, rs2); \
4359 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
4362 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
4365 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
4366 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4373 #ifdef TARGET_SPARC64
4374 #define FMOVCC(fcc, sz) \
4377 cond = GET_FIELD_SP(insn, 14, 17); \
4378 gen_fcompare(&cmp, fcc, cond); \
4379 gen_fmov##sz(dc, &cmp, rd, rs2); \
4382 case 0x001: /* V9 fmovscc %fcc0 */
4385 case 0x002: /* V9 fmovdcc %fcc0 */
4388 case 0x003: /* V9 fmovqcc %fcc0 */
4389 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4392 case 0x041: /* V9 fmovscc %fcc1 */
4395 case 0x042: /* V9 fmovdcc %fcc1 */
4398 case 0x043: /* V9 fmovqcc %fcc1 */
4399 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4402 case 0x081: /* V9 fmovscc %fcc2 */
4405 case 0x082: /* V9 fmovdcc %fcc2 */
4408 case 0x083: /* V9 fmovqcc %fcc2 */
4409 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4412 case 0x0c1: /* V9 fmovscc %fcc3 */
4415 case 0x0c2: /* V9 fmovdcc %fcc3 */
4418 case 0x0c3: /* V9 fmovqcc %fcc3 */
4419 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4423 #define FMOVCC(xcc, sz) \
4426 cond = GET_FIELD_SP(insn, 14, 17); \
4427 gen_compare(&cmp, xcc, cond, dc); \
4428 gen_fmov##sz(dc, &cmp, rd, rs2); \
4431 case 0x101: /* V9 fmovscc %icc */
4434 case 0x102: /* V9 fmovdcc %icc */
4437 case 0x103: /* V9 fmovqcc %icc */
4438 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4441 case 0x181: /* V9 fmovscc %xcc */
4444 case 0x182: /* V9 fmovdcc %xcc */
4447 case 0x183: /* V9 fmovqcc %xcc */
4448 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4453 case 0x51: /* fcmps, V9 %fcc */
4454 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4455 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
4456 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
4458 case 0x52: /* fcmpd, V9 %fcc */
4459 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4460 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4461 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
4463 case 0x53: /* fcmpq, V9 %fcc */
4464 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4465 gen_op_load_fpr_QT0(QFPREG(rs1
));
4466 gen_op_load_fpr_QT1(QFPREG(rs2
));
4467 gen_op_fcmpq(rd
& 3);
4469 case 0x55: /* fcmpes, V9 %fcc */
4470 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4471 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
4472 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
4474 case 0x56: /* fcmped, V9 %fcc */
4475 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4476 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4477 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
4479 case 0x57: /* fcmpeq, V9 %fcc */
4480 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4481 gen_op_load_fpr_QT0(QFPREG(rs1
));
4482 gen_op_load_fpr_QT1(QFPREG(rs2
));
4483 gen_op_fcmpeq(rd
& 3);
4488 #ifdef TARGET_SPARC64
4489 } else if (xop
== 0x25) { /* sll, V9 sllx */
4490 cpu_src1
= get_src1(dc
, insn
);
4491 if (IS_IMM
) { /* immediate */
4492 simm
= GET_FIELDs(insn
, 20, 31);
4493 if (insn
& (1 << 12)) {
4494 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4496 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
4498 } else { /* register */
4499 rs2
= GET_FIELD(insn
, 27, 31);
4500 cpu_src2
= gen_load_gpr(dc
, rs2
);
4501 cpu_tmp0
= tcg_temp_new();
4502 if (insn
& (1 << 12)) {
4503 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4505 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4507 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4509 gen_store_gpr(dc
, rd
, cpu_dst
);
4510 } else if (xop
== 0x26) { /* srl, V9 srlx */
4511 cpu_src1
= get_src1(dc
, insn
);
4512 if (IS_IMM
) { /* immediate */
4513 simm
= GET_FIELDs(insn
, 20, 31);
4514 if (insn
& (1 << 12)) {
4515 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4517 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
4518 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
4520 } else { /* register */
4521 rs2
= GET_FIELD(insn
, 27, 31);
4522 cpu_src2
= gen_load_gpr(dc
, rs2
);
4523 cpu_tmp0
= tcg_temp_new();
4524 if (insn
& (1 << 12)) {
4525 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4526 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4528 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4529 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
4530 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
4533 gen_store_gpr(dc
, rd
, cpu_dst
);
4534 } else if (xop
== 0x27) { /* sra, V9 srax */
4535 cpu_src1
= get_src1(dc
, insn
);
4536 if (IS_IMM
) { /* immediate */
4537 simm
= GET_FIELDs(insn
, 20, 31);
4538 if (insn
& (1 << 12)) {
4539 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
4541 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
4542 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
4544 } else { /* register */
4545 rs2
= GET_FIELD(insn
, 27, 31);
4546 cpu_src2
= gen_load_gpr(dc
, rs2
);
4547 cpu_tmp0
= tcg_temp_new();
4548 if (insn
& (1 << 12)) {
4549 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
4550 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
4552 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
4553 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
4554 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
4557 gen_store_gpr(dc
, rd
, cpu_dst
);
4559 } else if (xop
< 0x36) {
4561 cpu_src1
= get_src1(dc
, insn
);
4562 cpu_src2
= get_src2(dc
, insn
);
4563 switch (xop
& ~0x10) {
4564 #ifdef TARGET_SPARC64
4565 case 0x9: /* V9 mulx */
4566 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
4569 case 0xa: /* umul */
4570 CHECK_IU_FEATURE(dc
, MUL
);
4571 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
4573 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4574 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4575 dc
->cc_op
= CC_OP_LOGIC
;
4578 case 0xb: /* smul */
4579 CHECK_IU_FEATURE(dc
, MUL
);
4580 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
4582 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
4583 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
4584 dc
->cc_op
= CC_OP_LOGIC
;
4587 case 0xc: /* subx, V9 subc */
4588 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4591 #ifdef TARGET_SPARC64
4592 case 0xd: /* V9 udivx */
4593 gen_helper_udivx(cpu_dst
, tcg_env
, cpu_src1
, cpu_src2
);
4596 case 0xe: /* udiv */
4597 CHECK_IU_FEATURE(dc
, DIV
);
4599 gen_helper_udiv_cc(cpu_dst
, tcg_env
, cpu_src1
,
4601 dc
->cc_op
= CC_OP_DIV
;
4603 gen_helper_udiv(cpu_dst
, tcg_env
, cpu_src1
,
4607 case 0xf: /* sdiv */
4608 CHECK_IU_FEATURE(dc
, DIV
);
4610 gen_helper_sdiv_cc(cpu_dst
, tcg_env
, cpu_src1
,
4612 dc
->cc_op
= CC_OP_DIV
;
4614 gen_helper_sdiv(cpu_dst
, tcg_env
, cpu_src1
,
4621 gen_store_gpr(dc
, rd
, cpu_dst
);
4623 cpu_src1
= get_src1(dc
, insn
);
4624 cpu_src2
= get_src2(dc
, insn
);
4626 case 0x20: /* taddcc */
4627 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4628 gen_store_gpr(dc
, rd
, cpu_dst
);
4629 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
4630 dc
->cc_op
= CC_OP_TADD
;
4632 case 0x21: /* tsubcc */
4633 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4634 gen_store_gpr(dc
, rd
, cpu_dst
);
4635 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
4636 dc
->cc_op
= CC_OP_TSUB
;
4638 case 0x22: /* taddcctv */
4639 gen_helper_taddcctv(cpu_dst
, tcg_env
,
4640 cpu_src1
, cpu_src2
);
4641 gen_store_gpr(dc
, rd
, cpu_dst
);
4642 dc
->cc_op
= CC_OP_TADDTV
;
4644 case 0x23: /* tsubcctv */
4645 gen_helper_tsubcctv(cpu_dst
, tcg_env
,
4646 cpu_src1
, cpu_src2
);
4647 gen_store_gpr(dc
, rd
, cpu_dst
);
4648 dc
->cc_op
= CC_OP_TSUBTV
;
4650 case 0x24: /* mulscc */
4652 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
4653 gen_store_gpr(dc
, rd
, cpu_dst
);
4654 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4655 dc
->cc_op
= CC_OP_ADD
;
4657 #ifndef TARGET_SPARC64
4658 case 0x25: /* sll */
4659 if (IS_IMM
) { /* immediate */
4660 simm
= GET_FIELDs(insn
, 20, 31);
4661 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4662 } else { /* register */
4663 cpu_tmp0
= tcg_temp_new();
4664 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4665 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4667 gen_store_gpr(dc
, rd
, cpu_dst
);
4669 case 0x26: /* srl */
4670 if (IS_IMM
) { /* immediate */
4671 simm
= GET_FIELDs(insn
, 20, 31);
4672 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4673 } else { /* register */
4674 cpu_tmp0
= tcg_temp_new();
4675 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4676 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4678 gen_store_gpr(dc
, rd
, cpu_dst
);
4680 case 0x27: /* sra */
4681 if (IS_IMM
) { /* immediate */
4682 simm
= GET_FIELDs(insn
, 20, 31);
4683 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4684 } else { /* register */
4685 cpu_tmp0
= tcg_temp_new();
4686 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4687 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4689 gen_store_gpr(dc
, rd
, cpu_dst
);
4693 goto illegal_insn
; /* WRASR in decodetree */
4695 goto illegal_insn
; /* WRPR in decodetree */
4696 case 0x33: /* wrtbr, UA2005 wrhpr */
4697 goto illegal_insn
; /* WRTBR, WRHPR in decodetree */
4698 #ifdef TARGET_SPARC64
4699 case 0x2c: /* V9 movcc */
4701 int cc
= GET_FIELD_SP(insn
, 11, 12);
4702 int cond
= GET_FIELD_SP(insn
, 14, 17);
4706 if (insn
& (1 << 18)) {
4708 gen_compare(&cmp
, 0, cond
, dc
);
4709 } else if (cc
== 2) {
4710 gen_compare(&cmp
, 1, cond
, dc
);
4715 gen_fcompare(&cmp
, cc
, cond
);
4718 /* The get_src2 above loaded the normal 13-bit
4719 immediate field, not the 11-bit field we have
4720 in movcc. But it did handle the reg case. */
4722 simm
= GET_FIELD_SPs(insn
, 0, 10);
4723 tcg_gen_movi_tl(cpu_src2
, simm
);
4726 dst
= gen_load_gpr(dc
, rd
);
4727 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4730 gen_store_gpr(dc
, rd
, dst
);
4733 case 0x2d: /* V9 sdivx */
4734 gen_helper_sdivx(cpu_dst
, tcg_env
, cpu_src1
, cpu_src2
);
4735 gen_store_gpr(dc
, rd
, cpu_dst
);
4737 case 0x2e: /* V9 popc */
4738 tcg_gen_ctpop_tl(cpu_dst
, cpu_src2
);
4739 gen_store_gpr(dc
, rd
, cpu_dst
);
4741 case 0x2f: /* V9 movr */
4743 int cond
= GET_FIELD_SP(insn
, 10, 12);
4747 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4749 /* The get_src2 above loaded the normal 13-bit
4750 immediate field, not the 10-bit field we have
4751 in movr. But it did handle the reg case. */
4753 simm
= GET_FIELD_SPs(insn
, 0, 9);
4754 tcg_gen_movi_tl(cpu_src2
, simm
);
4757 dst
= gen_load_gpr(dc
, rd
);
4758 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4761 gen_store_gpr(dc
, rd
, dst
);
4769 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4770 #ifdef TARGET_SPARC64
4771 int opf
= GET_FIELD_SP(insn
, 5, 13);
4772 rs1
= GET_FIELD(insn
, 13, 17);
4773 rs2
= GET_FIELD(insn
, 27, 31);
4774 if (gen_trap_ifnofpu(dc
)) {
4779 case 0x000: /* VIS I edge8cc */
4780 CHECK_FPU_FEATURE(dc
, VIS1
);
4781 cpu_src1
= gen_load_gpr(dc
, rs1
);
4782 cpu_src2
= gen_load_gpr(dc
, rs2
);
4783 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4784 gen_store_gpr(dc
, rd
, cpu_dst
);
4786 case 0x001: /* VIS II edge8n */
4787 CHECK_FPU_FEATURE(dc
, VIS2
);
4788 cpu_src1
= gen_load_gpr(dc
, rs1
);
4789 cpu_src2
= gen_load_gpr(dc
, rs2
);
4790 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4791 gen_store_gpr(dc
, rd
, cpu_dst
);
4793 case 0x002: /* VIS I edge8lcc */
4794 CHECK_FPU_FEATURE(dc
, VIS1
);
4795 cpu_src1
= gen_load_gpr(dc
, rs1
);
4796 cpu_src2
= gen_load_gpr(dc
, rs2
);
4797 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4798 gen_store_gpr(dc
, rd
, cpu_dst
);
4800 case 0x003: /* VIS II edge8ln */
4801 CHECK_FPU_FEATURE(dc
, VIS2
);
4802 cpu_src1
= gen_load_gpr(dc
, rs1
);
4803 cpu_src2
= gen_load_gpr(dc
, rs2
);
4804 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4805 gen_store_gpr(dc
, rd
, cpu_dst
);
4807 case 0x004: /* VIS I edge16cc */
4808 CHECK_FPU_FEATURE(dc
, VIS1
);
4809 cpu_src1
= gen_load_gpr(dc
, rs1
);
4810 cpu_src2
= gen_load_gpr(dc
, rs2
);
4811 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4812 gen_store_gpr(dc
, rd
, cpu_dst
);
4814 case 0x005: /* VIS II edge16n */
4815 CHECK_FPU_FEATURE(dc
, VIS2
);
4816 cpu_src1
= gen_load_gpr(dc
, rs1
);
4817 cpu_src2
= gen_load_gpr(dc
, rs2
);
4818 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4819 gen_store_gpr(dc
, rd
, cpu_dst
);
4821 case 0x006: /* VIS I edge16lcc */
4822 CHECK_FPU_FEATURE(dc
, VIS1
);
4823 cpu_src1
= gen_load_gpr(dc
, rs1
);
4824 cpu_src2
= gen_load_gpr(dc
, rs2
);
4825 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4826 gen_store_gpr(dc
, rd
, cpu_dst
);
4828 case 0x007: /* VIS II edge16ln */
4829 CHECK_FPU_FEATURE(dc
, VIS2
);
4830 cpu_src1
= gen_load_gpr(dc
, rs1
);
4831 cpu_src2
= gen_load_gpr(dc
, rs2
);
4832 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4833 gen_store_gpr(dc
, rd
, cpu_dst
);
4835 case 0x008: /* VIS I edge32cc */
4836 CHECK_FPU_FEATURE(dc
, VIS1
);
4837 cpu_src1
= gen_load_gpr(dc
, rs1
);
4838 cpu_src2
= gen_load_gpr(dc
, rs2
);
4839 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4840 gen_store_gpr(dc
, rd
, cpu_dst
);
4842 case 0x009: /* VIS II edge32n */
4843 CHECK_FPU_FEATURE(dc
, VIS2
);
4844 cpu_src1
= gen_load_gpr(dc
, rs1
);
4845 cpu_src2
= gen_load_gpr(dc
, rs2
);
4846 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4847 gen_store_gpr(dc
, rd
, cpu_dst
);
4849 case 0x00a: /* VIS I edge32lcc */
4850 CHECK_FPU_FEATURE(dc
, VIS1
);
4851 cpu_src1
= gen_load_gpr(dc
, rs1
);
4852 cpu_src2
= gen_load_gpr(dc
, rs2
);
4853 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4854 gen_store_gpr(dc
, rd
, cpu_dst
);
4856 case 0x00b: /* VIS II edge32ln */
4857 CHECK_FPU_FEATURE(dc
, VIS2
);
4858 cpu_src1
= gen_load_gpr(dc
, rs1
);
4859 cpu_src2
= gen_load_gpr(dc
, rs2
);
4860 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4861 gen_store_gpr(dc
, rd
, cpu_dst
);
4863 case 0x010: /* VIS I array8 */
4864 CHECK_FPU_FEATURE(dc
, VIS1
);
4865 cpu_src1
= gen_load_gpr(dc
, rs1
);
4866 cpu_src2
= gen_load_gpr(dc
, rs2
);
4867 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4868 gen_store_gpr(dc
, rd
, cpu_dst
);
4870 case 0x012: /* VIS I array16 */
4871 CHECK_FPU_FEATURE(dc
, VIS1
);
4872 cpu_src1
= gen_load_gpr(dc
, rs1
);
4873 cpu_src2
= gen_load_gpr(dc
, rs2
);
4874 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4875 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4876 gen_store_gpr(dc
, rd
, cpu_dst
);
4878 case 0x014: /* VIS I array32 */
4879 CHECK_FPU_FEATURE(dc
, VIS1
);
4880 cpu_src1
= gen_load_gpr(dc
, rs1
);
4881 cpu_src2
= gen_load_gpr(dc
, rs2
);
4882 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4883 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4884 gen_store_gpr(dc
, rd
, cpu_dst
);
4886 case 0x018: /* VIS I alignaddr */
4887 CHECK_FPU_FEATURE(dc
, VIS1
);
4888 cpu_src1
= gen_load_gpr(dc
, rs1
);
4889 cpu_src2
= gen_load_gpr(dc
, rs2
);
4890 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4891 gen_store_gpr(dc
, rd
, cpu_dst
);
4893 case 0x01a: /* VIS I alignaddrl */
4894 CHECK_FPU_FEATURE(dc
, VIS1
);
4895 cpu_src1
= gen_load_gpr(dc
, rs1
);
4896 cpu_src2
= gen_load_gpr(dc
, rs2
);
4897 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4898 gen_store_gpr(dc
, rd
, cpu_dst
);
4900 case 0x019: /* VIS II bmask */
4901 CHECK_FPU_FEATURE(dc
, VIS2
);
4902 cpu_src1
= gen_load_gpr(dc
, rs1
);
4903 cpu_src2
= gen_load_gpr(dc
, rs2
);
4904 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4905 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4906 gen_store_gpr(dc
, rd
, cpu_dst
);
4908 case 0x020: /* VIS I fcmple16 */
4909 CHECK_FPU_FEATURE(dc
, VIS1
);
4910 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4911 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4912 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4913 gen_store_gpr(dc
, rd
, cpu_dst
);
4915 case 0x022: /* VIS I fcmpne16 */
4916 CHECK_FPU_FEATURE(dc
, VIS1
);
4917 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4918 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4919 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4920 gen_store_gpr(dc
, rd
, cpu_dst
);
4922 case 0x024: /* VIS I fcmple32 */
4923 CHECK_FPU_FEATURE(dc
, VIS1
);
4924 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4925 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4926 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4927 gen_store_gpr(dc
, rd
, cpu_dst
);
4929 case 0x026: /* VIS I fcmpne32 */
4930 CHECK_FPU_FEATURE(dc
, VIS1
);
4931 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4932 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4933 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4934 gen_store_gpr(dc
, rd
, cpu_dst
);
4936 case 0x028: /* VIS I fcmpgt16 */
4937 CHECK_FPU_FEATURE(dc
, VIS1
);
4938 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4939 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4940 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4941 gen_store_gpr(dc
, rd
, cpu_dst
);
4943 case 0x02a: /* VIS I fcmpeq16 */
4944 CHECK_FPU_FEATURE(dc
, VIS1
);
4945 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4946 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4947 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4948 gen_store_gpr(dc
, rd
, cpu_dst
);
4950 case 0x02c: /* VIS I fcmpgt32 */
4951 CHECK_FPU_FEATURE(dc
, VIS1
);
4952 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4953 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4954 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4955 gen_store_gpr(dc
, rd
, cpu_dst
);
4957 case 0x02e: /* VIS I fcmpeq32 */
4958 CHECK_FPU_FEATURE(dc
, VIS1
);
4959 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4960 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4961 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4962 gen_store_gpr(dc
, rd
, cpu_dst
);
4964 case 0x031: /* VIS I fmul8x16 */
4965 CHECK_FPU_FEATURE(dc
, VIS1
);
4966 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4968 case 0x033: /* VIS I fmul8x16au */
4969 CHECK_FPU_FEATURE(dc
, VIS1
);
4970 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4972 case 0x035: /* VIS I fmul8x16al */
4973 CHECK_FPU_FEATURE(dc
, VIS1
);
4974 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4976 case 0x036: /* VIS I fmul8sux16 */
4977 CHECK_FPU_FEATURE(dc
, VIS1
);
4978 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4980 case 0x037: /* VIS I fmul8ulx16 */
4981 CHECK_FPU_FEATURE(dc
, VIS1
);
4982 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4984 case 0x038: /* VIS I fmuld8sux16 */
4985 CHECK_FPU_FEATURE(dc
, VIS1
);
4986 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4988 case 0x039: /* VIS I fmuld8ulx16 */
4989 CHECK_FPU_FEATURE(dc
, VIS1
);
4990 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4992 case 0x03a: /* VIS I fpack32 */
4993 CHECK_FPU_FEATURE(dc
, VIS1
);
4994 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4996 case 0x03b: /* VIS I fpack16 */
4997 CHECK_FPU_FEATURE(dc
, VIS1
);
4998 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4999 cpu_dst_32
= gen_dest_fpr_F(dc
);
5000 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5001 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5003 case 0x03d: /* VIS I fpackfix */
5004 CHECK_FPU_FEATURE(dc
, VIS1
);
5005 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5006 cpu_dst_32
= gen_dest_fpr_F(dc
);
5007 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5008 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5010 case 0x03e: /* VIS I pdist */
5011 CHECK_FPU_FEATURE(dc
, VIS1
);
5012 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
5014 case 0x048: /* VIS I faligndata */
5015 CHECK_FPU_FEATURE(dc
, VIS1
);
5016 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
5018 case 0x04b: /* VIS I fpmerge */
5019 CHECK_FPU_FEATURE(dc
, VIS1
);
5020 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
5022 case 0x04c: /* VIS II bshuffle */
5023 CHECK_FPU_FEATURE(dc
, VIS2
);
5024 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
5026 case 0x04d: /* VIS I fexpand */
5027 CHECK_FPU_FEATURE(dc
, VIS1
);
5028 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
5030 case 0x050: /* VIS I fpadd16 */
5031 CHECK_FPU_FEATURE(dc
, VIS1
);
5032 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
5034 case 0x051: /* VIS I fpadd16s */
5035 CHECK_FPU_FEATURE(dc
, VIS1
);
5036 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
5038 case 0x052: /* VIS I fpadd32 */
5039 CHECK_FPU_FEATURE(dc
, VIS1
);
5040 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
5042 case 0x053: /* VIS I fpadd32s */
5043 CHECK_FPU_FEATURE(dc
, VIS1
);
5044 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
5046 case 0x054: /* VIS I fpsub16 */
5047 CHECK_FPU_FEATURE(dc
, VIS1
);
5048 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
5050 case 0x055: /* VIS I fpsub16s */
5051 CHECK_FPU_FEATURE(dc
, VIS1
);
5052 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
5054 case 0x056: /* VIS I fpsub32 */
5055 CHECK_FPU_FEATURE(dc
, VIS1
);
5056 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
5058 case 0x057: /* VIS I fpsub32s */
5059 CHECK_FPU_FEATURE(dc
, VIS1
);
5060 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
5062 case 0x060: /* VIS I fzero */
5063 CHECK_FPU_FEATURE(dc
, VIS1
);
5064 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5065 tcg_gen_movi_i64(cpu_dst_64
, 0);
5066 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5068 case 0x061: /* VIS I fzeros */
5069 CHECK_FPU_FEATURE(dc
, VIS1
);
5070 cpu_dst_32
= gen_dest_fpr_F(dc
);
5071 tcg_gen_movi_i32(cpu_dst_32
, 0);
5072 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5074 case 0x062: /* VIS I fnor */
5075 CHECK_FPU_FEATURE(dc
, VIS1
);
5076 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
5078 case 0x063: /* VIS I fnors */
5079 CHECK_FPU_FEATURE(dc
, VIS1
);
5080 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
5082 case 0x064: /* VIS I fandnot2 */
5083 CHECK_FPU_FEATURE(dc
, VIS1
);
5084 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
5086 case 0x065: /* VIS I fandnot2s */
5087 CHECK_FPU_FEATURE(dc
, VIS1
);
5088 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
5090 case 0x066: /* VIS I fnot2 */
5091 CHECK_FPU_FEATURE(dc
, VIS1
);
5092 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
5094 case 0x067: /* VIS I fnot2s */
5095 CHECK_FPU_FEATURE(dc
, VIS1
);
5096 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
5098 case 0x068: /* VIS I fandnot1 */
5099 CHECK_FPU_FEATURE(dc
, VIS1
);
5100 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
5102 case 0x069: /* VIS I fandnot1s */
5103 CHECK_FPU_FEATURE(dc
, VIS1
);
5104 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
5106 case 0x06a: /* VIS I fnot1 */
5107 CHECK_FPU_FEATURE(dc
, VIS1
);
5108 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
5110 case 0x06b: /* VIS I fnot1s */
5111 CHECK_FPU_FEATURE(dc
, VIS1
);
5112 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
5114 case 0x06c: /* VIS I fxor */
5115 CHECK_FPU_FEATURE(dc
, VIS1
);
5116 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
5118 case 0x06d: /* VIS I fxors */
5119 CHECK_FPU_FEATURE(dc
, VIS1
);
5120 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
5122 case 0x06e: /* VIS I fnand */
5123 CHECK_FPU_FEATURE(dc
, VIS1
);
5124 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
5126 case 0x06f: /* VIS I fnands */
5127 CHECK_FPU_FEATURE(dc
, VIS1
);
5128 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
5130 case 0x070: /* VIS I fand */
5131 CHECK_FPU_FEATURE(dc
, VIS1
);
5132 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
5134 case 0x071: /* VIS I fands */
5135 CHECK_FPU_FEATURE(dc
, VIS1
);
5136 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
5138 case 0x072: /* VIS I fxnor */
5139 CHECK_FPU_FEATURE(dc
, VIS1
);
5140 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
5142 case 0x073: /* VIS I fxnors */
5143 CHECK_FPU_FEATURE(dc
, VIS1
);
5144 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
5146 case 0x074: /* VIS I fsrc1 */
5147 CHECK_FPU_FEATURE(dc
, VIS1
);
5148 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5149 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5151 case 0x075: /* VIS I fsrc1s */
5152 CHECK_FPU_FEATURE(dc
, VIS1
);
5153 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
5154 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5156 case 0x076: /* VIS I fornot2 */
5157 CHECK_FPU_FEATURE(dc
, VIS1
);
5158 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
5160 case 0x077: /* VIS I fornot2s */
5161 CHECK_FPU_FEATURE(dc
, VIS1
);
5162 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
5164 case 0x078: /* VIS I fsrc2 */
5165 CHECK_FPU_FEATURE(dc
, VIS1
);
5166 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5167 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5169 case 0x079: /* VIS I fsrc2s */
5170 CHECK_FPU_FEATURE(dc
, VIS1
);
5171 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
5172 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5174 case 0x07a: /* VIS I fornot1 */
5175 CHECK_FPU_FEATURE(dc
, VIS1
);
5176 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
5178 case 0x07b: /* VIS I fornot1s */
5179 CHECK_FPU_FEATURE(dc
, VIS1
);
5180 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
5182 case 0x07c: /* VIS I for */
5183 CHECK_FPU_FEATURE(dc
, VIS1
);
5184 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
5186 case 0x07d: /* VIS I fors */
5187 CHECK_FPU_FEATURE(dc
, VIS1
);
5188 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
5190 case 0x07e: /* VIS I fone */
5191 CHECK_FPU_FEATURE(dc
, VIS1
);
5192 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5193 tcg_gen_movi_i64(cpu_dst_64
, -1);
5194 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5196 case 0x07f: /* VIS I fones */
5197 CHECK_FPU_FEATURE(dc
, VIS1
);
5198 cpu_dst_32
= gen_dest_fpr_F(dc
);
5199 tcg_gen_movi_i32(cpu_dst_32
, -1);
5200 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5202 case 0x080: /* VIS I shutdown */
5203 case 0x081: /* VIS II siam */
5212 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
5213 #ifdef TARGET_SPARC64
5218 #ifdef TARGET_SPARC64
5219 } else if (xop
== 0x39) { /* V9 return */
5221 cpu_src1
= get_src1(dc
, insn
);
5222 cpu_tmp0
= tcg_temp_new();
5223 if (IS_IMM
) { /* immediate */
5224 simm
= GET_FIELDs(insn
, 19, 31);
5225 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5226 } else { /* register */
5227 rs2
= GET_FIELD(insn
, 27, 31);
5229 cpu_src2
= gen_load_gpr(dc
, rs2
);
5230 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5232 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5235 gen_check_align(dc
, cpu_tmp0
, 3);
5236 gen_helper_restore(tcg_env
);
5238 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5239 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5243 cpu_src1
= get_src1(dc
, insn
);
5244 cpu_tmp0
= tcg_temp_new();
5245 if (IS_IMM
) { /* immediate */
5246 simm
= GET_FIELDs(insn
, 19, 31);
5247 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5248 } else { /* register */
5249 rs2
= GET_FIELD(insn
, 27, 31);
5251 cpu_src2
= gen_load_gpr(dc
, rs2
);
5252 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5254 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5258 case 0x38: /* jmpl */
5260 gen_check_align(dc
, cpu_tmp0
, 3);
5261 gen_store_gpr(dc
, rd
, tcg_constant_tl(dc
->pc
));
5263 gen_address_mask(dc
, cpu_tmp0
);
5264 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5265 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5268 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5269 case 0x39: /* rett, V9 return */
5271 if (!supervisor(dc
))
5273 gen_check_align(dc
, cpu_tmp0
, 3);
5275 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5276 dc
->npc
= DYNAMIC_PC
;
5277 gen_helper_rett(tcg_env
);
5281 case 0x3b: /* flush */
5284 case 0x3c: /* save */
5285 gen_helper_save(tcg_env
);
5286 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5288 case 0x3d: /* restore */
5289 gen_helper_restore(tcg_env
);
5290 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5292 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5293 case 0x3e: /* V9 done/retry */
5297 if (!supervisor(dc
))
5299 dc
->npc
= DYNAMIC_PC
;
5300 dc
->pc
= DYNAMIC_PC
;
5301 translator_io_start(&dc
->base
);
5302 gen_helper_done(tcg_env
);
5305 if (!supervisor(dc
))
5307 dc
->npc
= DYNAMIC_PC
;
5308 dc
->pc
= DYNAMIC_PC
;
5309 translator_io_start(&dc
->base
);
5310 gen_helper_retry(tcg_env
);
5325 case 3: /* load/store instructions */
5327 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5328 /* ??? gen_address_mask prevents us from using a source
5329 register directly. Always generate a temporary. */
5330 TCGv cpu_addr
= tcg_temp_new();
5332 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5333 if (xop
== 0x3c || xop
== 0x3e) {
5334 /* V9 casa/casxa : no offset */
5335 } else if (IS_IMM
) { /* immediate */
5336 simm
= GET_FIELDs(insn
, 19, 31);
5338 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5340 } else { /* register */
5341 rs2
= GET_FIELD(insn
, 27, 31);
5343 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5346 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5347 (xop
> 0x17 && xop
<= 0x1d ) ||
5348 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5349 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5352 case 0x0: /* ld, V9 lduw, load unsigned word */
5353 gen_address_mask(dc
, cpu_addr
);
5354 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5355 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5357 case 0x1: /* ldub, load unsigned byte */
5358 gen_address_mask(dc
, cpu_addr
);
5359 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5360 dc
->mem_idx
, MO_UB
);
5362 case 0x2: /* lduh, load unsigned halfword */
5363 gen_address_mask(dc
, cpu_addr
);
5364 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5365 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5367 case 0x3: /* ldd, load double word */
5373 gen_address_mask(dc
, cpu_addr
);
5374 t64
= tcg_temp_new_i64();
5375 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5376 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5377 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5378 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5379 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5380 tcg_gen_shri_i64(t64
, t64
, 32);
5381 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5382 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5385 case 0x9: /* ldsb, load signed byte */
5386 gen_address_mask(dc
, cpu_addr
);
5387 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_SB
);
5389 case 0xa: /* ldsh, load signed halfword */
5390 gen_address_mask(dc
, cpu_addr
);
5391 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5392 dc
->mem_idx
, MO_TESW
| MO_ALIGN
);
5394 case 0xd: /* ldstub */
5395 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5398 /* swap, swap register with memory. Also atomically */
5399 cpu_src1
= gen_load_gpr(dc
, rd
);
5400 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5401 dc
->mem_idx
, MO_TEUL
);
5403 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5404 case 0x10: /* lda, V9 lduwa, load word alternate */
5405 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5407 case 0x11: /* lduba, load unsigned byte alternate */
5408 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5410 case 0x12: /* lduha, load unsigned halfword alternate */
5411 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5413 case 0x13: /* ldda, load double word alternate */
5417 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5419 case 0x19: /* ldsba, load signed byte alternate */
5420 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5422 case 0x1a: /* ldsha, load signed halfword alternate */
5423 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5425 case 0x1d: /* ldstuba -- XXX: should be atomically */
5426 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5428 case 0x1f: /* swapa, swap reg with alt. memory. Also
5430 cpu_src1
= gen_load_gpr(dc
, rd
);
5431 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5434 #ifndef TARGET_SPARC64
5435 case 0x30: /* ldc */
5436 case 0x31: /* ldcsr */
5437 case 0x33: /* lddc */
5441 #ifdef TARGET_SPARC64
5442 case 0x08: /* V9 ldsw */
5443 gen_address_mask(dc
, cpu_addr
);
5444 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5445 dc
->mem_idx
, MO_TESL
| MO_ALIGN
);
5447 case 0x0b: /* V9 ldx */
5448 gen_address_mask(dc
, cpu_addr
);
5449 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5450 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5452 case 0x18: /* V9 ldswa */
5453 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5455 case 0x1b: /* V9 ldxa */
5456 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5458 case 0x2d: /* V9 prefetch, no effect */
5460 case 0x30: /* V9 ldfa */
5461 if (gen_trap_ifnofpu(dc
)) {
5464 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5465 gen_update_fprs_dirty(dc
, rd
);
5467 case 0x33: /* V9 lddfa */
5468 if (gen_trap_ifnofpu(dc
)) {
5471 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5472 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5474 case 0x3d: /* V9 prefetcha, no effect */
5476 case 0x32: /* V9 ldqfa */
5477 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5478 if (gen_trap_ifnofpu(dc
)) {
5481 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5482 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5488 gen_store_gpr(dc
, rd
, cpu_val
);
5489 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5492 } else if (xop
>= 0x20 && xop
< 0x24) {
5493 if (gen_trap_ifnofpu(dc
)) {
5497 case 0x20: /* ldf, load fpreg */
5498 gen_address_mask(dc
, cpu_addr
);
5499 cpu_dst_32
= gen_dest_fpr_F(dc
);
5500 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5501 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5502 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5504 case 0x21: /* ldfsr, V9 ldxfsr */
5505 #ifdef TARGET_SPARC64
5506 gen_address_mask(dc
, cpu_addr
);
5508 TCGv_i64 t64
= tcg_temp_new_i64();
5509 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5510 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5511 gen_helper_ldxfsr(cpu_fsr
, tcg_env
, cpu_fsr
, t64
);
5515 cpu_dst_32
= tcg_temp_new_i32();
5516 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5517 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5518 gen_helper_ldfsr(cpu_fsr
, tcg_env
, cpu_fsr
, cpu_dst_32
);
5520 case 0x22: /* ldqf, load quad fpreg */
5521 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5522 gen_address_mask(dc
, cpu_addr
);
5523 cpu_src1_64
= tcg_temp_new_i64();
5524 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5525 MO_TEUQ
| MO_ALIGN_4
);
5526 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5527 cpu_src2_64
= tcg_temp_new_i64();
5528 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5529 MO_TEUQ
| MO_ALIGN_4
);
5530 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5532 case 0x23: /* lddf, load double fpreg */
5533 gen_address_mask(dc
, cpu_addr
);
5534 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5535 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5536 MO_TEUQ
| MO_ALIGN_4
);
5537 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5542 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5543 xop
== 0xe || xop
== 0x1e) {
5544 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5547 case 0x4: /* st, store word */
5548 gen_address_mask(dc
, cpu_addr
);
5549 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5550 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5552 case 0x5: /* stb, store byte */
5553 gen_address_mask(dc
, cpu_addr
);
5554 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_UB
);
5556 case 0x6: /* sth, store halfword */
5557 gen_address_mask(dc
, cpu_addr
);
5558 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5559 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5561 case 0x7: /* std, store double word */
5568 gen_address_mask(dc
, cpu_addr
);
5569 lo
= gen_load_gpr(dc
, rd
+ 1);
5570 t64
= tcg_temp_new_i64();
5571 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5572 tcg_gen_qemu_st_i64(t64
, cpu_addr
,
5573 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5576 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5577 case 0x14: /* sta, V9 stwa, store word alternate */
5578 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5580 case 0x15: /* stba, store byte alternate */
5581 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5583 case 0x16: /* stha, store halfword alternate */
5584 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5586 case 0x17: /* stda, store double word alternate */
5590 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5593 #ifdef TARGET_SPARC64
5594 case 0x0e: /* V9 stx */
5595 gen_address_mask(dc
, cpu_addr
);
5596 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5597 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5599 case 0x1e: /* V9 stxa */
5600 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5606 } else if (xop
> 0x23 && xop
< 0x28) {
5607 if (gen_trap_ifnofpu(dc
)) {
5611 case 0x24: /* stf, store fpreg */
5612 gen_address_mask(dc
, cpu_addr
);
5613 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5614 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5615 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5617 case 0x25: /* stfsr, V9 stxfsr */
5619 #ifdef TARGET_SPARC64
5620 gen_address_mask(dc
, cpu_addr
);
5622 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5623 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5627 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5628 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5632 #ifdef TARGET_SPARC64
5633 /* V9 stqf, store quad fpreg */
5634 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5635 gen_address_mask(dc
, cpu_addr
);
5636 /* ??? While stqf only requires 4-byte alignment, it is
5637 legal for the cpu to signal the unaligned exception.
5638 The OS trap handler is then required to fix it up.
5639 For qemu, this avoids having to probe the second page
5640 before performing the first write. */
5641 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5642 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5643 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN_16
);
5644 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5645 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5646 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5647 dc
->mem_idx
, MO_TEUQ
);
5649 #else /* !TARGET_SPARC64 */
5650 /* stdfq, store floating point queue */
5651 #if defined(CONFIG_USER_ONLY)
5654 if (!supervisor(dc
))
5656 if (gen_trap_ifnofpu(dc
)) {
5662 case 0x27: /* stdf, store double fpreg */
5663 gen_address_mask(dc
, cpu_addr
);
5664 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5665 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5666 MO_TEUQ
| MO_ALIGN_4
);
5671 } else if (xop
> 0x33 && xop
< 0x3f) {
5673 #ifdef TARGET_SPARC64
5674 case 0x34: /* V9 stfa */
5675 if (gen_trap_ifnofpu(dc
)) {
5678 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5680 case 0x36: /* V9 stqfa */
5682 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5683 if (gen_trap_ifnofpu(dc
)) {
5686 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5689 case 0x37: /* V9 stdfa */
5690 if (gen_trap_ifnofpu(dc
)) {
5693 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5695 case 0x3e: /* V9 casxa */
5696 rs2
= GET_FIELD(insn
, 27, 31);
5697 cpu_src2
= gen_load_gpr(dc
, rs2
);
5698 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5701 case 0x34: /* stc */
5702 case 0x35: /* stcsr */
5703 case 0x36: /* stdcq */
5704 case 0x37: /* stdc */
5707 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5708 case 0x3c: /* V9 or LEON3 casa */
5709 #ifndef TARGET_SPARC64
5710 CHECK_IU_FEATURE(dc
, CASA
);
5712 rs2
= GET_FIELD(insn
, 27, 31);
5713 cpu_src2
= gen_load_gpr(dc
, rs2
);
5714 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5730 gen_exception(dc
, TT_ILL_INSN
);
5732 #if !defined(CONFIG_USER_ONLY)
5734 gen_exception(dc
, TT_PRIV_INSN
);
5738 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5740 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5742 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5745 #ifndef TARGET_SPARC64
5747 gen_exception(dc
, TT_NCP_INSN
);
5752 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5754 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5755 CPUSPARCState
*env
= cpu_env(cs
);
5758 dc
->pc
= dc
->base
.pc_first
;
5759 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5760 dc
->cc_op
= CC_OP_DYNAMIC
;
5761 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5762 dc
->def
= &env
->def
;
5763 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5764 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5765 #ifndef CONFIG_USER_ONLY
5766 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5768 #ifdef TARGET_SPARC64
5770 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5771 #ifndef CONFIG_USER_ONLY
5772 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5776 * if we reach a page boundary, we stop generation so that the
5777 * PC of a TT_TFAULT exception is always in the right page
5779 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5780 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5783 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5787 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5789 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5790 target_ulong npc
= dc
->npc
;
5795 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5796 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5799 case DYNAMIC_PC_LOOKUP
:
5803 g_assert_not_reached();
5806 tcg_gen_insn_start(dc
->pc
, npc
);
5809 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5811 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5812 CPUSPARCState
*env
= cpu_env(cs
);
5815 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5816 dc
->base
.pc_next
+= 4;
5818 if (!decode(dc
, insn
)) {
5819 disas_sparc_legacy(dc
, insn
);
5822 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5825 if (dc
->pc
!= dc
->base
.pc_next
) {
5826 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5830 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5832 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5833 DisasDelayException
*e
, *e_next
;
5836 switch (dc
->base
.is_jmp
) {
5838 case DISAS_TOO_MANY
:
5839 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5840 /* static PC and NPC: we can use direct chaining */
5841 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5848 case DYNAMIC_PC_LOOKUP
:
5854 g_assert_not_reached();
5857 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5863 gen_generic_branch(dc
);
5868 case DYNAMIC_PC_LOOKUP
:
5871 g_assert_not_reached();
5874 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5877 tcg_gen_lookup_and_goto_ptr();
5879 tcg_gen_exit_tb(NULL
, 0);
5883 case DISAS_NORETURN
:
5889 tcg_gen_exit_tb(NULL
, 0);
5893 g_assert_not_reached();
5896 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5897 gen_set_label(e
->lab
);
5899 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5900 if (e
->npc
% 4 == 0) {
5901 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5903 gen_helper_raise_exception(tcg_env
, e
->excp
);
5910 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5911 CPUState
*cpu
, FILE *logfile
)
5913 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5914 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5917 static const TranslatorOps sparc_tr_ops
= {
5918 .init_disas_context
= sparc_tr_init_disas_context
,
5919 .tb_start
= sparc_tr_tb_start
,
5920 .insn_start
= sparc_tr_insn_start
,
5921 .translate_insn
= sparc_tr_translate_insn
,
5922 .tb_stop
= sparc_tr_tb_stop
,
5923 .disas_log
= sparc_tr_disas_log
,
5926 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5927 target_ulong pc
, void *host_pc
)
5929 DisasContext dc
= {};
5931 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5934 void sparc_tcg_init(void)
5936 static const char gregnames
[32][4] = {
5937 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5938 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5939 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5940 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5942 static const char fregnames
[32][4] = {
5943 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5944 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5945 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5946 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5949 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5950 #ifdef TARGET_SPARC64
5951 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5952 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5954 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5955 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5958 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5959 #ifdef TARGET_SPARC64
5960 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5962 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5963 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5964 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5965 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5966 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5967 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5968 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5969 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5970 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5975 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5976 offsetof(CPUSPARCState
, regwptr
),
5979 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5980 *r32
[i
].ptr
= tcg_global_mem_new_i32(tcg_env
, r32
[i
].off
, r32
[i
].name
);
5983 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5984 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5988 for (i
= 1; i
< 8; ++i
) {
5989 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5990 offsetof(CPUSPARCState
, gregs
[i
]),
5994 for (i
= 8; i
< 32; ++i
) {
5995 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5996 (i
- 8) * sizeof(target_ulong
),
6000 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
6001 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
6002 offsetof(CPUSPARCState
, fpr
[i
]),
6007 void sparc_restore_state_to_opc(CPUState
*cs
,
6008 const TranslationBlock
*tb
,
6009 const uint64_t *data
)
6011 SPARCCPU
*cpu
= SPARC_CPU(cs
);
6012 CPUSPARCState
*env
= &cpu
->env
;
6013 target_ulong pc
= data
[0];
6014 target_ulong npc
= data
[1];
6017 if (npc
== DYNAMIC_PC
) {
6018 /* dynamic NPC: already stored */
6019 } else if (npc
& JUMP_PC
) {
6020 /* jump PC: use 'cond' and the jump targets of the translation */
6022 env
->npc
= npc
& ~3;