4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
29 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
40 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
49 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
50 # define gen_helper_restored(E) qemu_build_not_reached()
51 # define gen_helper_retry(E) qemu_build_not_reached()
52 # define gen_helper_saved(E) qemu_build_not_reached()
53 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
59 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
60 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
61 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
62 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
68 /* Dynamic PC, must exit to main loop. */
70 /* Dynamic PC, one of two values according to jump_pc[T2]. */
72 /* Dynamic PC, may lookup next TB. */
73 #define DYNAMIC_PC_LOOKUP 3
75 #define DISAS_EXIT DISAS_TARGET_0
77 /* global register indexes */
78 static TCGv_ptr cpu_regwptr
;
79 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
80 static TCGv_i32 cpu_cc_op
;
81 static TCGv_i32 cpu_psr
;
82 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
83 static TCGv cpu_regs
[32];
88 static TCGv_i32 cpu_xcc
, cpu_fprs
;
91 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
92 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
94 /* Floating point registers */
95 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
97 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
99 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
100 # define env64_field_offsetof(X) env_field_offsetof(X)
102 # define env32_field_offsetof(X) env_field_offsetof(X)
103 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
106 typedef struct DisasDelayException
{
107 struct DisasDelayException
*next
;
110 /* Saved state at parent insn. */
113 } DisasDelayException
;
115 typedef struct DisasContext
{
116 DisasContextBase base
;
117 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
118 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
119 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
122 bool address_mask_32bit
;
123 #ifndef CONFIG_USER_ONLY
125 #ifdef TARGET_SPARC64
130 uint32_t cc_op
; /* current CC operation */
132 #ifdef TARGET_SPARC64
136 DisasDelayException
*delay_excp_list
;
145 // This function uses non-native bit order
146 #define GET_FIELD(X, FROM, TO) \
147 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
149 // This function uses the order in the manuals, i.e. bit 0 is 2^0
150 #define GET_FIELD_SP(X, FROM, TO) \
151 GET_FIELD(X, 31 - (TO), 31 - (FROM))
153 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
154 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
156 #ifdef TARGET_SPARC64
157 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
158 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
160 #define DFPREG(r) (r & 0x1e)
161 #define QFPREG(r) (r & 0x1c)
164 #define UA2005_HTRAP_MASK 0xff
165 #define V8_TRAP_MASK 0x7f
167 static int sign_extend(int x
, int len
)
170 return (x
<< len
) >> len
;
173 #define IS_IMM (insn & (1<<13))
175 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
177 #if defined(TARGET_SPARC64)
178 int bit
= (rd
< 32) ? 1 : 2;
179 /* If we know we've already set this bit within the TB,
180 we can avoid setting it again. */
181 if (!(dc
->fprs_dirty
& bit
)) {
182 dc
->fprs_dirty
|= bit
;
183 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
188 /* floating point registers moves */
189 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
191 TCGv_i32 ret
= tcg_temp_new_i32();
193 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
195 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
200 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
202 TCGv_i64 t
= tcg_temp_new_i64();
204 tcg_gen_extu_i32_i64(t
, v
);
205 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
206 (dst
& 1 ? 0 : 32), 32);
207 gen_update_fprs_dirty(dc
, dst
);
210 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
212 return tcg_temp_new_i32();
215 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
218 return cpu_fpr
[src
/ 2];
221 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
224 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
225 gen_update_fprs_dirty(dc
, dst
);
228 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
230 return cpu_fpr
[DFPREG(dst
) / 2];
233 static void gen_op_load_fpr_QT0(unsigned int src
)
235 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
236 offsetof(CPU_QuadU
, ll
.upper
));
237 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
238 offsetof(CPU_QuadU
, ll
.lower
));
241 static void gen_op_load_fpr_QT1(unsigned int src
)
243 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
244 offsetof(CPU_QuadU
, ll
.upper
));
245 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
246 offsetof(CPU_QuadU
, ll
.lower
));
249 static void gen_op_store_QT0_fpr(unsigned int dst
)
251 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
252 offsetof(CPU_QuadU
, ll
.upper
));
253 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
254 offsetof(CPU_QuadU
, ll
.lower
));
257 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
258 TCGv_i64 v1
, TCGv_i64 v2
)
262 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
263 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
264 gen_update_fprs_dirty(dc
, dst
);
267 #ifdef TARGET_SPARC64
268 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
271 return cpu_fpr
[src
/ 2];
274 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
277 return cpu_fpr
[src
/ 2 + 1];
280 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
285 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
286 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
287 gen_update_fprs_dirty(dc
, rd
);
292 #ifdef CONFIG_USER_ONLY
293 #define supervisor(dc) 0
294 #define hypervisor(dc) 0
296 #ifdef TARGET_SPARC64
297 #define hypervisor(dc) (dc->hypervisor)
298 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
300 #define supervisor(dc) (dc->supervisor)
301 #define hypervisor(dc) 0
305 #if !defined(TARGET_SPARC64)
306 # define AM_CHECK(dc) false
307 #elif defined(TARGET_ABI32)
308 # define AM_CHECK(dc) true
309 #elif defined(CONFIG_USER_ONLY)
310 # define AM_CHECK(dc) false
312 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
315 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
318 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
322 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
324 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
327 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
331 return cpu_regs
[reg
];
333 TCGv t
= tcg_temp_new();
334 tcg_gen_movi_tl(t
, 0);
339 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
343 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
347 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
351 return cpu_regs
[reg
];
353 return tcg_temp_new();
357 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
359 return translator_use_goto_tb(&s
->base
, pc
) &&
360 translator_use_goto_tb(&s
->base
, npc
);
363 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
364 target_ulong pc
, target_ulong npc
)
366 if (use_goto_tb(s
, pc
, npc
)) {
367 /* jump to same page: we can use a direct jump */
368 tcg_gen_goto_tb(tb_num
);
369 tcg_gen_movi_tl(cpu_pc
, pc
);
370 tcg_gen_movi_tl(cpu_npc
, npc
);
371 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
373 /* jump to another page: we can use an indirect jump */
374 tcg_gen_movi_tl(cpu_pc
, pc
);
375 tcg_gen_movi_tl(cpu_npc
, npc
);
376 tcg_gen_lookup_and_goto_ptr();
381 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
383 tcg_gen_extu_i32_tl(reg
, src
);
384 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
387 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
389 tcg_gen_extu_i32_tl(reg
, src
);
390 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
393 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
395 tcg_gen_extu_i32_tl(reg
, src
);
396 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
399 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
401 tcg_gen_extu_i32_tl(reg
, src
);
402 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
405 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
407 tcg_gen_mov_tl(cpu_cc_src
, src1
);
408 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
409 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
410 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
413 static TCGv_i32
gen_add32_carry32(void)
415 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
417 /* Carry is computed from a previous add: (dst < src) */
418 #if TARGET_LONG_BITS == 64
419 cc_src1_32
= tcg_temp_new_i32();
420 cc_src2_32
= tcg_temp_new_i32();
421 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
422 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
424 cc_src1_32
= cpu_cc_dst
;
425 cc_src2_32
= cpu_cc_src
;
428 carry_32
= tcg_temp_new_i32();
429 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
434 static TCGv_i32
gen_sub32_carry32(void)
436 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
438 /* Carry is computed from a previous borrow: (src1 < src2) */
439 #if TARGET_LONG_BITS == 64
440 cc_src1_32
= tcg_temp_new_i32();
441 cc_src2_32
= tcg_temp_new_i32();
442 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
443 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
445 cc_src1_32
= cpu_cc_src
;
446 cc_src2_32
= cpu_cc_src2
;
449 carry_32
= tcg_temp_new_i32();
450 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
455 static void gen_op_addc_int(TCGv dst
, TCGv src1
, TCGv src2
,
456 TCGv_i32 carry_32
, bool update_cc
)
458 tcg_gen_add_tl(dst
, src1
, src2
);
460 #ifdef TARGET_SPARC64
461 TCGv carry
= tcg_temp_new();
462 tcg_gen_extu_i32_tl(carry
, carry_32
);
463 tcg_gen_add_tl(dst
, dst
, carry
);
465 tcg_gen_add_i32(dst
, dst
, carry_32
);
469 tcg_debug_assert(dst
== cpu_cc_dst
);
470 tcg_gen_mov_tl(cpu_cc_src
, src1
);
471 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
475 static void gen_op_addc_int_add(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
479 if (TARGET_LONG_BITS
== 64) {
480 gen_op_addc_int(dst
, src1
, src2
, gen_add32_carry32(), update_cc
);
485 * We can re-use the host's hardware carry generation by using
486 * an ADD2 opcode. We discard the low part of the output.
487 * Ideally we'd combine this operation with the add that
488 * generated the carry in the first place.
490 discard
= tcg_temp_new();
491 tcg_gen_add2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
494 tcg_debug_assert(dst
== cpu_cc_dst
);
495 tcg_gen_mov_tl(cpu_cc_src
, src1
);
496 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
500 static void gen_op_addc_add(TCGv dst
, TCGv src1
, TCGv src2
)
502 gen_op_addc_int_add(dst
, src1
, src2
, false);
505 static void gen_op_addccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
507 gen_op_addc_int_add(dst
, src1
, src2
, true);
510 static void gen_op_addc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
512 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), false);
515 static void gen_op_addccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
517 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), true);
520 static void gen_op_addc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
523 TCGv_i32 carry_32
= tcg_temp_new_i32();
524 gen_helper_compute_C_icc(carry_32
, tcg_env
);
525 gen_op_addc_int(dst
, src1
, src2
, carry_32
, update_cc
);
528 static void gen_op_addc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
530 gen_op_addc_int_generic(dst
, src1
, src2
, false);
533 static void gen_op_addccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
535 gen_op_addc_int_generic(dst
, src1
, src2
, true);
538 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
540 tcg_gen_mov_tl(cpu_cc_src
, src1
);
541 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
542 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
543 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
546 static void gen_op_subc_int(TCGv dst
, TCGv src1
, TCGv src2
,
547 TCGv_i32 carry_32
, bool update_cc
)
551 #if TARGET_LONG_BITS == 64
552 carry
= tcg_temp_new();
553 tcg_gen_extu_i32_i64(carry
, carry_32
);
558 tcg_gen_sub_tl(dst
, src1
, src2
);
559 tcg_gen_sub_tl(dst
, dst
, carry
);
562 tcg_debug_assert(dst
== cpu_cc_dst
);
563 tcg_gen_mov_tl(cpu_cc_src
, src1
);
564 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
568 static void gen_op_subc_add(TCGv dst
, TCGv src1
, TCGv src2
)
570 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), false);
573 static void gen_op_subccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
575 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), true);
578 static void gen_op_subc_int_sub(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
582 if (TARGET_LONG_BITS
== 64) {
583 gen_op_subc_int(dst
, src1
, src2
, gen_sub32_carry32(), update_cc
);
588 * We can re-use the host's hardware carry generation by using
589 * a SUB2 opcode. We discard the low part of the output.
591 discard
= tcg_temp_new();
592 tcg_gen_sub2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
595 tcg_debug_assert(dst
== cpu_cc_dst
);
596 tcg_gen_mov_tl(cpu_cc_src
, src1
);
597 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
601 static void gen_op_subc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
603 gen_op_subc_int_sub(dst
, src1
, src2
, false);
606 static void gen_op_subccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
608 gen_op_subc_int_sub(dst
, src1
, src2
, true);
611 static void gen_op_subc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
614 TCGv_i32 carry_32
= tcg_temp_new_i32();
616 gen_helper_compute_C_icc(carry_32
, tcg_env
);
617 gen_op_subc_int(dst
, src1
, src2
, carry_32
, update_cc
);
620 static void gen_op_subc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
622 gen_op_subc_int_generic(dst
, src1
, src2
, false);
625 static void gen_op_subccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
627 gen_op_subc_int_generic(dst
, src1
, src2
, true);
630 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
632 TCGv r_temp
, zero
, t0
;
634 r_temp
= tcg_temp_new();
641 zero
= tcg_constant_tl(0);
642 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
643 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
644 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
645 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
649 // env->y = (b2 << 31) | (env->y >> 1);
650 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
651 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
654 gen_mov_reg_N(t0
, cpu_psr
);
655 gen_mov_reg_V(r_temp
, cpu_psr
);
656 tcg_gen_xor_tl(t0
, t0
, r_temp
);
658 // T0 = (b1 << 31) | (T0 >> 1);
660 tcg_gen_shli_tl(t0
, t0
, 31);
661 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
662 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
664 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
666 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
669 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
671 #if TARGET_LONG_BITS == 32
673 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
675 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
678 TCGv t0
= tcg_temp_new_i64();
679 TCGv t1
= tcg_temp_new_i64();
682 tcg_gen_ext32s_i64(t0
, src1
);
683 tcg_gen_ext32s_i64(t1
, src2
);
685 tcg_gen_ext32u_i64(t0
, src1
);
686 tcg_gen_ext32u_i64(t1
, src2
);
689 tcg_gen_mul_i64(dst
, t0
, t1
);
690 tcg_gen_shri_i64(cpu_y
, dst
, 32);
694 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
696 /* zero-extend truncated operands before multiplication */
697 gen_op_multiply(dst
, src1
, src2
, 0);
700 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
702 /* sign-extend truncated operands before multiplication */
703 gen_op_multiply(dst
, src1
, src2
, 1);
706 static void gen_op_udivx(TCGv dst
, TCGv src1
, TCGv src2
)
708 gen_helper_udivx(dst
, tcg_env
, src1
, src2
);
711 static void gen_op_sdivx(TCGv dst
, TCGv src1
, TCGv src2
)
713 gen_helper_sdivx(dst
, tcg_env
, src1
, src2
);
716 static void gen_op_udiv(TCGv dst
, TCGv src1
, TCGv src2
)
718 gen_helper_udiv(dst
, tcg_env
, src1
, src2
);
721 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
723 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
726 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
728 gen_helper_udiv_cc(dst
, tcg_env
, src1
, src2
);
731 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
733 gen_helper_sdiv_cc(dst
, tcg_env
, src1
, src2
);
736 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
738 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
741 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
743 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
746 static void gen_op_popc(TCGv dst
, TCGv src1
, TCGv src2
)
748 tcg_gen_ctpop_tl(dst
, src2
);
752 static void gen_op_eval_ba(TCGv dst
)
754 tcg_gen_movi_tl(dst
, 1);
758 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
760 gen_mov_reg_Z(dst
, src
);
764 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
766 TCGv t0
= tcg_temp_new();
767 gen_mov_reg_N(t0
, src
);
768 gen_mov_reg_V(dst
, src
);
769 tcg_gen_xor_tl(dst
, dst
, t0
);
770 gen_mov_reg_Z(t0
, src
);
771 tcg_gen_or_tl(dst
, dst
, t0
);
775 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
777 TCGv t0
= tcg_temp_new();
778 gen_mov_reg_V(t0
, src
);
779 gen_mov_reg_N(dst
, src
);
780 tcg_gen_xor_tl(dst
, dst
, t0
);
784 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
786 TCGv t0
= tcg_temp_new();
787 gen_mov_reg_Z(t0
, src
);
788 gen_mov_reg_C(dst
, src
);
789 tcg_gen_or_tl(dst
, dst
, t0
);
793 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
795 gen_mov_reg_C(dst
, src
);
799 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
801 gen_mov_reg_V(dst
, src
);
805 static void gen_op_eval_bn(TCGv dst
)
807 tcg_gen_movi_tl(dst
, 0);
811 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
813 gen_mov_reg_N(dst
, src
);
817 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
819 gen_mov_reg_Z(dst
, src
);
820 tcg_gen_xori_tl(dst
, dst
, 0x1);
824 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
826 gen_op_eval_ble(dst
, src
);
827 tcg_gen_xori_tl(dst
, dst
, 0x1);
831 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
833 gen_op_eval_bl(dst
, src
);
834 tcg_gen_xori_tl(dst
, dst
, 0x1);
838 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
840 gen_op_eval_bleu(dst
, src
);
841 tcg_gen_xori_tl(dst
, dst
, 0x1);
845 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
847 gen_mov_reg_C(dst
, src
);
848 tcg_gen_xori_tl(dst
, dst
, 0x1);
852 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
854 gen_mov_reg_N(dst
, src
);
855 tcg_gen_xori_tl(dst
, dst
, 0x1);
859 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
861 gen_mov_reg_V(dst
, src
);
862 tcg_gen_xori_tl(dst
, dst
, 0x1);
866 FPSR bit field FCC1 | FCC0:
872 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
873 unsigned int fcc_offset
)
875 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
876 tcg_gen_andi_tl(reg
, reg
, 0x1);
879 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
881 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
882 tcg_gen_andi_tl(reg
, reg
, 0x1);
886 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
888 TCGv t0
= tcg_temp_new();
889 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
890 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
891 tcg_gen_or_tl(dst
, dst
, t0
);
894 // 1 or 2: FCC0 ^ FCC1
895 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
897 TCGv t0
= tcg_temp_new();
898 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
899 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
900 tcg_gen_xor_tl(dst
, dst
, t0
);
904 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
906 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
910 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
912 TCGv t0
= tcg_temp_new();
913 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
914 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
915 tcg_gen_andc_tl(dst
, dst
, t0
);
919 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
921 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
925 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
927 TCGv t0
= tcg_temp_new();
928 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
929 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
930 tcg_gen_andc_tl(dst
, t0
, dst
);
934 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
936 TCGv t0
= tcg_temp_new();
937 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
938 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
939 tcg_gen_and_tl(dst
, dst
, t0
);
943 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
945 TCGv t0
= tcg_temp_new();
946 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
947 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
948 tcg_gen_or_tl(dst
, dst
, t0
);
949 tcg_gen_xori_tl(dst
, dst
, 0x1);
952 // 0 or 3: !(FCC0 ^ FCC1)
953 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
955 TCGv t0
= tcg_temp_new();
956 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
957 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
958 tcg_gen_xor_tl(dst
, dst
, t0
);
959 tcg_gen_xori_tl(dst
, dst
, 0x1);
963 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
965 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
966 tcg_gen_xori_tl(dst
, dst
, 0x1);
969 // !1: !(FCC0 & !FCC1)
970 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
972 TCGv t0
= tcg_temp_new();
973 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
974 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
975 tcg_gen_andc_tl(dst
, dst
, t0
);
976 tcg_gen_xori_tl(dst
, dst
, 0x1);
980 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
982 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
983 tcg_gen_xori_tl(dst
, dst
, 0x1);
986 // !2: !(!FCC0 & FCC1)
987 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
989 TCGv t0
= tcg_temp_new();
990 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
991 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
992 tcg_gen_andc_tl(dst
, t0
, dst
);
993 tcg_gen_xori_tl(dst
, dst
, 0x1);
996 // !3: !(FCC0 & FCC1)
997 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
999 TCGv t0
= tcg_temp_new();
1000 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
1001 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
1002 tcg_gen_and_tl(dst
, dst
, t0
);
1003 tcg_gen_xori_tl(dst
, dst
, 0x1);
1006 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
1007 target_ulong pc2
, TCGv r_cond
)
1009 TCGLabel
*l1
= gen_new_label();
1011 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
1013 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
1016 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
1019 static void gen_generic_branch(DisasContext
*dc
)
1021 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
1022 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
1023 TCGv zero
= tcg_constant_tl(0);
1025 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1028 /* call this function before using the condition register as it may
1029 have been set for a jump */
1030 static void flush_cond(DisasContext
*dc
)
1032 if (dc
->npc
== JUMP_PC
) {
1033 gen_generic_branch(dc
);
1034 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1038 static void save_npc(DisasContext
*dc
)
1043 gen_generic_branch(dc
);
1044 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1047 case DYNAMIC_PC_LOOKUP
:
1050 g_assert_not_reached();
1053 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1057 static void update_psr(DisasContext
*dc
)
1059 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1060 dc
->cc_op
= CC_OP_FLAGS
;
1061 gen_helper_compute_psr(tcg_env
);
1065 static void save_state(DisasContext
*dc
)
1067 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1071 static void gen_exception(DisasContext
*dc
, int which
)
1074 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
1075 dc
->base
.is_jmp
= DISAS_NORETURN
;
1078 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
1080 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
1082 e
->next
= dc
->delay_excp_list
;
1083 dc
->delay_excp_list
= e
;
1085 e
->lab
= gen_new_label();
1088 /* Caller must have used flush_cond before branch. */
1089 assert(e
->npc
!= JUMP_PC
);
1095 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
1097 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
1100 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
1102 TCGv t
= tcg_temp_new();
1105 tcg_gen_andi_tl(t
, addr
, mask
);
1108 lab
= delay_exception(dc
, TT_UNALIGNED
);
1109 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
1112 static void gen_mov_pc_npc(DisasContext
*dc
)
1117 gen_generic_branch(dc
);
1118 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1119 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1122 case DYNAMIC_PC_LOOKUP
:
1123 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1127 g_assert_not_reached();
1134 static void gen_op_next_insn(void)
1136 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1137 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1140 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1143 static int subcc_cond
[16] = {
1159 -1, /* no overflow */
1162 static int logic_cond
[16] = {
1164 TCG_COND_EQ
, /* eq: Z */
1165 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1166 TCG_COND_LT
, /* lt: N ^ V -> N */
1167 TCG_COND_EQ
, /* leu: C | Z -> Z */
1168 TCG_COND_NEVER
, /* ltu: C -> 0 */
1169 TCG_COND_LT
, /* neg: N */
1170 TCG_COND_NEVER
, /* vs: V -> 0 */
1172 TCG_COND_NE
, /* ne: !Z */
1173 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1174 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1175 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1176 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1177 TCG_COND_GE
, /* pos: !N */
1178 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1184 #ifdef TARGET_SPARC64
1194 switch (dc
->cc_op
) {
1196 cmp
->cond
= logic_cond
[cond
];
1198 cmp
->is_bool
= false;
1199 cmp
->c2
= tcg_constant_tl(0);
1200 #ifdef TARGET_SPARC64
1202 cmp
->c1
= tcg_temp_new();
1203 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1207 cmp
->c1
= cpu_cc_dst
;
1214 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1215 goto do_compare_dst_0
;
1217 case 7: /* overflow */
1218 case 15: /* !overflow */
1222 cmp
->cond
= subcc_cond
[cond
];
1223 cmp
->is_bool
= false;
1224 #ifdef TARGET_SPARC64
1226 /* Note that sign-extension works for unsigned compares as
1227 long as both operands are sign-extended. */
1228 cmp
->c1
= tcg_temp_new();
1229 cmp
->c2
= tcg_temp_new();
1230 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1231 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1235 cmp
->c1
= cpu_cc_src
;
1236 cmp
->c2
= cpu_cc_src2
;
1243 gen_helper_compute_psr(tcg_env
);
1244 dc
->cc_op
= CC_OP_FLAGS
;
1248 /* We're going to generate a boolean result. */
1249 cmp
->cond
= TCG_COND_NE
;
1250 cmp
->is_bool
= true;
1251 cmp
->c1
= r_dst
= tcg_temp_new();
1252 cmp
->c2
= tcg_constant_tl(0);
1256 gen_op_eval_bn(r_dst
);
1259 gen_op_eval_be(r_dst
, r_src
);
1262 gen_op_eval_ble(r_dst
, r_src
);
1265 gen_op_eval_bl(r_dst
, r_src
);
1268 gen_op_eval_bleu(r_dst
, r_src
);
1271 gen_op_eval_bcs(r_dst
, r_src
);
1274 gen_op_eval_bneg(r_dst
, r_src
);
1277 gen_op_eval_bvs(r_dst
, r_src
);
1280 gen_op_eval_ba(r_dst
);
1283 gen_op_eval_bne(r_dst
, r_src
);
1286 gen_op_eval_bg(r_dst
, r_src
);
1289 gen_op_eval_bge(r_dst
, r_src
);
1292 gen_op_eval_bgu(r_dst
, r_src
);
1295 gen_op_eval_bcc(r_dst
, r_src
);
1298 gen_op_eval_bpos(r_dst
, r_src
);
1301 gen_op_eval_bvc(r_dst
, r_src
);
1308 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1310 unsigned int offset
;
1313 /* For now we still generate a straight boolean result. */
1314 cmp
->cond
= TCG_COND_NE
;
1315 cmp
->is_bool
= true;
1316 cmp
->c1
= r_dst
= tcg_temp_new();
1317 cmp
->c2
= tcg_constant_tl(0);
1337 gen_op_eval_bn(r_dst
);
1340 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1343 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1346 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1349 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1352 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1355 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1358 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1361 gen_op_eval_ba(r_dst
);
1364 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1367 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1370 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1373 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1376 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1379 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1382 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1388 static const TCGCond gen_tcg_cond_reg
[8] = {
1389 TCG_COND_NEVER
, /* reserved */
1393 TCG_COND_NEVER
, /* reserved */
1399 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1401 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1402 cmp
->is_bool
= false;
1404 cmp
->c2
= tcg_constant_tl(0);
1407 #ifdef TARGET_SPARC64
1408 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1412 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1415 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1418 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1421 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1426 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1430 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1433 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1436 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1439 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1444 static void gen_op_fcmpq(int fccno
)
1448 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1451 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1454 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1457 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1462 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1466 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1469 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1472 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1475 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1480 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1484 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1487 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1490 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1493 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1498 static void gen_op_fcmpeq(int fccno
)
1502 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1505 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1508 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1511 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1518 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1520 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1523 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1525 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1528 static void gen_op_fcmpq(int fccno
)
1530 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1533 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1535 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1538 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1540 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1543 static void gen_op_fcmpeq(int fccno
)
1545 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1549 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1551 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1552 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1553 gen_exception(dc
, TT_FP_EXCP
);
1556 static int gen_trap_ifnofpu(DisasContext
*dc
)
1558 #if !defined(CONFIG_USER_ONLY)
1559 if (!dc
->fpu_enabled
) {
1560 gen_exception(dc
, TT_NFPU_INSN
);
1567 static void gen_op_clear_ieee_excp_and_FTT(void)
1569 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1572 static void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1573 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1577 src
= gen_load_fpr_F(dc
, rs
);
1578 dst
= gen_dest_fpr_F(dc
);
1580 gen(dst
, tcg_env
, src
);
1581 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1583 gen_store_fpr_F(dc
, rd
, dst
);
1586 static void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1587 void (*gen
)(TCGv_i32
, TCGv_i32
))
1591 src
= gen_load_fpr_F(dc
, rs
);
1592 dst
= gen_dest_fpr_F(dc
);
1596 gen_store_fpr_F(dc
, rd
, dst
);
1599 static void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1600 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1602 TCGv_i32 dst
, src1
, src2
;
1604 src1
= gen_load_fpr_F(dc
, rs1
);
1605 src2
= gen_load_fpr_F(dc
, rs2
);
1606 dst
= gen_dest_fpr_F(dc
);
1608 gen(dst
, tcg_env
, src1
, src2
);
1609 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1611 gen_store_fpr_F(dc
, rd
, dst
);
1614 #ifdef TARGET_SPARC64
1615 static void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1616 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1618 TCGv_i32 dst
, src1
, src2
;
1620 src1
= gen_load_fpr_F(dc
, rs1
);
1621 src2
= gen_load_fpr_F(dc
, rs2
);
1622 dst
= gen_dest_fpr_F(dc
);
1624 gen(dst
, src1
, src2
);
1626 gen_store_fpr_F(dc
, rd
, dst
);
1630 static void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1631 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1635 src
= gen_load_fpr_D(dc
, rs
);
1636 dst
= gen_dest_fpr_D(dc
, rd
);
1638 gen(dst
, tcg_env
, src
);
1639 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1641 gen_store_fpr_D(dc
, rd
, dst
);
1644 #ifdef TARGET_SPARC64
1645 static void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1646 void (*gen
)(TCGv_i64
, TCGv_i64
))
1650 src
= gen_load_fpr_D(dc
, rs
);
1651 dst
= gen_dest_fpr_D(dc
, rd
);
1655 gen_store_fpr_D(dc
, rd
, dst
);
1659 static void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1660 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1662 TCGv_i64 dst
, src1
, src2
;
1664 src1
= gen_load_fpr_D(dc
, rs1
);
1665 src2
= gen_load_fpr_D(dc
, rs2
);
1666 dst
= gen_dest_fpr_D(dc
, rd
);
1668 gen(dst
, tcg_env
, src1
, src2
);
1669 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1671 gen_store_fpr_D(dc
, rd
, dst
);
1674 #ifdef TARGET_SPARC64
1675 static void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1676 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1678 TCGv_i64 dst
, src1
, src2
;
1680 src1
= gen_load_fpr_D(dc
, rs1
);
1681 src2
= gen_load_fpr_D(dc
, rs2
);
1682 dst
= gen_dest_fpr_D(dc
, rd
);
1684 gen(dst
, src1
, src2
);
1686 gen_store_fpr_D(dc
, rd
, dst
);
1689 static void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1690 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1692 TCGv_i64 dst
, src1
, src2
;
1694 src1
= gen_load_fpr_D(dc
, rs1
);
1695 src2
= gen_load_fpr_D(dc
, rs2
);
1696 dst
= gen_dest_fpr_D(dc
, rd
);
1698 gen(dst
, cpu_gsr
, src1
, src2
);
1700 gen_store_fpr_D(dc
, rd
, dst
);
1703 static void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1704 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1706 TCGv_i64 dst
, src0
, src1
, src2
;
1708 src1
= gen_load_fpr_D(dc
, rs1
);
1709 src2
= gen_load_fpr_D(dc
, rs2
);
1710 src0
= gen_load_fpr_D(dc
, rd
);
1711 dst
= gen_dest_fpr_D(dc
, rd
);
1713 gen(dst
, src0
, src1
, src2
);
1715 gen_store_fpr_D(dc
, rd
, dst
);
1719 static void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1720 void (*gen
)(TCGv_ptr
))
1722 gen_op_load_fpr_QT1(QFPREG(rs
));
1725 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1727 gen_op_store_QT0_fpr(QFPREG(rd
));
1728 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1731 #ifdef TARGET_SPARC64
1732 static void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1733 void (*gen
)(TCGv_ptr
))
1735 gen_op_load_fpr_QT1(QFPREG(rs
));
1739 gen_op_store_QT0_fpr(QFPREG(rd
));
1740 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1744 static void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1745 void (*gen
)(TCGv_ptr
))
1747 gen_op_load_fpr_QT0(QFPREG(rs1
));
1748 gen_op_load_fpr_QT1(QFPREG(rs2
));
1751 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1753 gen_op_store_QT0_fpr(QFPREG(rd
));
1754 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1757 static void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1758 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1761 TCGv_i32 src1
, src2
;
1763 src1
= gen_load_fpr_F(dc
, rs1
);
1764 src2
= gen_load_fpr_F(dc
, rs2
);
1765 dst
= gen_dest_fpr_D(dc
, rd
);
1767 gen(dst
, tcg_env
, src1
, src2
);
1768 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1770 gen_store_fpr_D(dc
, rd
, dst
);
1773 static void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1774 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1776 TCGv_i64 src1
, src2
;
1778 src1
= gen_load_fpr_D(dc
, rs1
);
1779 src2
= gen_load_fpr_D(dc
, rs2
);
1781 gen(tcg_env
, src1
, src2
);
1782 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1784 gen_op_store_QT0_fpr(QFPREG(rd
));
1785 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1788 #ifdef TARGET_SPARC64
1789 static void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1790 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1795 src
= gen_load_fpr_F(dc
, rs
);
1796 dst
= gen_dest_fpr_D(dc
, rd
);
1798 gen(dst
, tcg_env
, src
);
1799 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1801 gen_store_fpr_D(dc
, rd
, dst
);
1805 static void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1806 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1811 src
= gen_load_fpr_F(dc
, rs
);
1812 dst
= gen_dest_fpr_D(dc
, rd
);
1814 gen(dst
, tcg_env
, src
);
1816 gen_store_fpr_D(dc
, rd
, dst
);
1819 static void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1820 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1825 src
= gen_load_fpr_D(dc
, rs
);
1826 dst
= gen_dest_fpr_F(dc
);
1828 gen(dst
, tcg_env
, src
);
1829 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1831 gen_store_fpr_F(dc
, rd
, dst
);
1834 static void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1835 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1839 gen_op_load_fpr_QT1(QFPREG(rs
));
1840 dst
= gen_dest_fpr_F(dc
);
1843 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1845 gen_store_fpr_F(dc
, rd
, dst
);
1848 static void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1849 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1853 gen_op_load_fpr_QT1(QFPREG(rs
));
1854 dst
= gen_dest_fpr_D(dc
, rd
);
1857 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1859 gen_store_fpr_D(dc
, rd
, dst
);
1862 static void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1863 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1867 src
= gen_load_fpr_F(dc
, rs
);
1871 gen_op_store_QT0_fpr(QFPREG(rd
));
1872 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1875 static void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1876 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1880 src
= gen_load_fpr_D(dc
, rs
);
1884 gen_op_store_QT0_fpr(QFPREG(rd
));
1885 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1888 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
1889 TCGv addr
, int mmu_idx
, MemOp memop
)
1891 gen_address_mask(dc
, addr
);
1892 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
| MO_ALIGN
);
1895 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
1897 TCGv m1
= tcg_constant_tl(0xff);
1898 gen_address_mask(dc
, addr
);
1899 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
1903 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1922 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
1924 int asi
= GET_FIELD(insn
, 19, 26);
1925 ASIType type
= GET_ASI_HELPER
;
1926 int mem_idx
= dc
->mem_idx
;
1928 #ifndef TARGET_SPARC64
1929 /* Before v9, all asis are immediate and privileged. */
1931 gen_exception(dc
, TT_ILL_INSN
);
1932 type
= GET_ASI_EXCP
;
1933 } else if (supervisor(dc
)
1934 /* Note that LEON accepts ASI_USERDATA in user mode, for
1935 use with CASA. Also note that previous versions of
1936 QEMU allowed (and old versions of gcc emitted) ASI_P
1937 for LEON, which is incorrect. */
1938 || (asi
== ASI_USERDATA
1939 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1941 case ASI_USERDATA
: /* User data access */
1942 mem_idx
= MMU_USER_IDX
;
1943 type
= GET_ASI_DIRECT
;
1945 case ASI_KERNELDATA
: /* Supervisor data access */
1946 mem_idx
= MMU_KERNEL_IDX
;
1947 type
= GET_ASI_DIRECT
;
1949 case ASI_M_BYPASS
: /* MMU passthrough */
1950 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1951 mem_idx
= MMU_PHYS_IDX
;
1952 type
= GET_ASI_DIRECT
;
1954 case ASI_M_BCOPY
: /* Block copy, sta access */
1955 mem_idx
= MMU_KERNEL_IDX
;
1956 type
= GET_ASI_BCOPY
;
1958 case ASI_M_BFILL
: /* Block fill, stda access */
1959 mem_idx
= MMU_KERNEL_IDX
;
1960 type
= GET_ASI_BFILL
;
1964 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1965 * permissions check in get_physical_address(..).
1967 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1969 gen_exception(dc
, TT_PRIV_INSN
);
1970 type
= GET_ASI_EXCP
;
1976 /* With v9, all asis below 0x80 are privileged. */
1977 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1978 down that bit into DisasContext. For the moment that's ok,
1979 since the direct implementations below doesn't have any ASIs
1980 in the restricted [0x30, 0x7f] range, and the check will be
1981 done properly in the helper. */
1982 if (!supervisor(dc
) && asi
< 0x80) {
1983 gen_exception(dc
, TT_PRIV_ACT
);
1984 type
= GET_ASI_EXCP
;
1987 case ASI_REAL
: /* Bypass */
1988 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1989 case ASI_REAL_L
: /* Bypass LE */
1990 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1991 case ASI_TWINX_REAL
: /* Real address, twinx */
1992 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1993 case ASI_QUAD_LDD_PHYS
:
1994 case ASI_QUAD_LDD_PHYS_L
:
1995 mem_idx
= MMU_PHYS_IDX
;
1997 case ASI_N
: /* Nucleus */
1998 case ASI_NL
: /* Nucleus LE */
2001 case ASI_NUCLEUS_QUAD_LDD
:
2002 case ASI_NUCLEUS_QUAD_LDD_L
:
2003 if (hypervisor(dc
)) {
2004 mem_idx
= MMU_PHYS_IDX
;
2006 mem_idx
= MMU_NUCLEUS_IDX
;
2009 case ASI_AIUP
: /* As if user primary */
2010 case ASI_AIUPL
: /* As if user primary LE */
2011 case ASI_TWINX_AIUP
:
2012 case ASI_TWINX_AIUP_L
:
2013 case ASI_BLK_AIUP_4V
:
2014 case ASI_BLK_AIUP_L_4V
:
2017 mem_idx
= MMU_USER_IDX
;
2019 case ASI_AIUS
: /* As if user secondary */
2020 case ASI_AIUSL
: /* As if user secondary LE */
2021 case ASI_TWINX_AIUS
:
2022 case ASI_TWINX_AIUS_L
:
2023 case ASI_BLK_AIUS_4V
:
2024 case ASI_BLK_AIUS_L_4V
:
2027 mem_idx
= MMU_USER_SECONDARY_IDX
;
2029 case ASI_S
: /* Secondary */
2030 case ASI_SL
: /* Secondary LE */
2033 case ASI_BLK_COMMIT_S
:
2040 if (mem_idx
== MMU_USER_IDX
) {
2041 mem_idx
= MMU_USER_SECONDARY_IDX
;
2042 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2043 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2046 case ASI_P
: /* Primary */
2047 case ASI_PL
: /* Primary LE */
2050 case ASI_BLK_COMMIT_P
:
2074 type
= GET_ASI_DIRECT
;
2076 case ASI_TWINX_REAL
:
2077 case ASI_TWINX_REAL_L
:
2080 case ASI_TWINX_AIUP
:
2081 case ASI_TWINX_AIUP_L
:
2082 case ASI_TWINX_AIUS
:
2083 case ASI_TWINX_AIUS_L
:
2088 case ASI_QUAD_LDD_PHYS
:
2089 case ASI_QUAD_LDD_PHYS_L
:
2090 case ASI_NUCLEUS_QUAD_LDD
:
2091 case ASI_NUCLEUS_QUAD_LDD_L
:
2092 type
= GET_ASI_DTWINX
;
2094 case ASI_BLK_COMMIT_P
:
2095 case ASI_BLK_COMMIT_S
:
2096 case ASI_BLK_AIUP_4V
:
2097 case ASI_BLK_AIUP_L_4V
:
2100 case ASI_BLK_AIUS_4V
:
2101 case ASI_BLK_AIUS_L_4V
:
2108 type
= GET_ASI_BLOCK
;
2115 type
= GET_ASI_SHORT
;
2122 type
= GET_ASI_SHORT
;
2125 /* The little-endian asis all have bit 3 set. */
2132 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2135 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2136 int insn
, MemOp memop
)
2138 DisasASI da
= get_asi(dc
, insn
, memop
);
2143 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2144 gen_exception(dc
, TT_ILL_INSN
);
2146 case GET_ASI_DIRECT
:
2147 gen_address_mask(dc
, addr
);
2148 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2152 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2153 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2156 #ifdef TARGET_SPARC64
2157 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
2160 TCGv_i64 t64
= tcg_temp_new_i64();
2161 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2162 tcg_gen_trunc_i64_tl(dst
, t64
);
2170 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2171 int insn
, MemOp memop
)
2173 DisasASI da
= get_asi(dc
, insn
, memop
);
2178 case GET_ASI_DTWINX
: /* Reserved for stda. */
2179 #ifndef TARGET_SPARC64
2180 gen_exception(dc
, TT_ILL_INSN
);
2183 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2184 /* Pre OpenSPARC CPUs don't have these */
2185 gen_exception(dc
, TT_ILL_INSN
);
2188 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2189 * are ST_BLKINIT_ ASIs */
2192 case GET_ASI_DIRECT
:
2193 gen_address_mask(dc
, addr
);
2194 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2196 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2198 /* Copy 32 bytes from the address in SRC to ADDR. */
2199 /* ??? The original qemu code suggests 4-byte alignment, dropping
2200 the low bits, but the only place I can see this used is in the
2201 Linux kernel with 32 byte alignment, which would make more sense
2202 as a cacheline-style operation. */
2204 TCGv saddr
= tcg_temp_new();
2205 TCGv daddr
= tcg_temp_new();
2206 TCGv four
= tcg_constant_tl(4);
2207 TCGv_i32 tmp
= tcg_temp_new_i32();
2210 tcg_gen_andi_tl(saddr
, src
, -4);
2211 tcg_gen_andi_tl(daddr
, addr
, -4);
2212 for (i
= 0; i
< 32; i
+= 4) {
2213 /* Since the loads and stores are paired, allow the
2214 copy to happen in the host endianness. */
2215 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2216 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2217 tcg_gen_add_tl(saddr
, saddr
, four
);
2218 tcg_gen_add_tl(daddr
, daddr
, four
);
2225 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2226 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2229 #ifdef TARGET_SPARC64
2230 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
2233 TCGv_i64 t64
= tcg_temp_new_i64();
2234 tcg_gen_extu_tl_i64(t64
, src
);
2235 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2239 /* A write to a TLB register may alter page maps. End the TB. */
2240 dc
->npc
= DYNAMIC_PC
;
2246 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2247 TCGv addr
, int insn
)
2249 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2254 case GET_ASI_DIRECT
:
2255 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2258 /* ??? Should be DAE_invalid_asi. */
2259 gen_exception(dc
, TT_DATA_ACCESS
);
2264 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2267 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2273 case GET_ASI_DIRECT
:
2274 oldv
= tcg_temp_new();
2275 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2276 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2277 gen_store_gpr(dc
, rd
, oldv
);
2280 /* ??? Should be DAE_invalid_asi. */
2281 gen_exception(dc
, TT_DATA_ACCESS
);
2286 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2288 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2293 case GET_ASI_DIRECT
:
2294 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2297 /* ??? In theory, this should be raise DAE_invalid_asi.
2298 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2299 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2300 gen_helper_exit_atomic(tcg_env
);
2302 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2303 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2307 t64
= tcg_temp_new_i64();
2308 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2310 s64
= tcg_constant_i64(0xff);
2311 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
2313 tcg_gen_trunc_i64_tl(dst
, t64
);
2316 dc
->npc
= DYNAMIC_PC
;
2323 #ifdef TARGET_SPARC64
2324 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2325 int insn
, int size
, int rd
)
2327 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2335 case GET_ASI_DIRECT
:
2336 gen_address_mask(dc
, addr
);
2339 d32
= gen_dest_fpr_F(dc
);
2340 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2341 gen_store_fpr_F(dc
, rd
, d32
);
2344 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2345 da
.memop
| MO_ALIGN_4
);
2348 d64
= tcg_temp_new_i64();
2349 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2350 tcg_gen_addi_tl(addr
, addr
, 8);
2351 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2352 da
.memop
| MO_ALIGN_4
);
2353 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2356 g_assert_not_reached();
2361 /* Valid for lddfa on aligned registers only. */
2362 if (size
== 8 && (rd
& 7) == 0) {
2367 gen_address_mask(dc
, addr
);
2369 /* The first operation checks required alignment. */
2370 memop
= da
.memop
| MO_ALIGN_64
;
2371 eight
= tcg_constant_tl(8);
2372 for (i
= 0; ; ++i
) {
2373 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2378 tcg_gen_add_tl(addr
, addr
, eight
);
2382 gen_exception(dc
, TT_ILL_INSN
);
2387 /* Valid for lddfa only. */
2389 gen_address_mask(dc
, addr
);
2390 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2391 da
.memop
| MO_ALIGN
);
2393 gen_exception(dc
, TT_ILL_INSN
);
2399 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2400 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
| MO_ALIGN
);
2403 /* According to the table in the UA2011 manual, the only
2404 other asis that are valid for ldfa/lddfa/ldqfa are
2405 the NO_FAULT asis. We still need a helper for these,
2406 but we can just use the integer asi helper for them. */
2409 d64
= tcg_temp_new_i64();
2410 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2411 d32
= gen_dest_fpr_F(dc
);
2412 tcg_gen_extrl_i64_i32(d32
, d64
);
2413 gen_store_fpr_F(dc
, rd
, d32
);
2416 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
, r_asi
, r_mop
);
2419 d64
= tcg_temp_new_i64();
2420 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2421 tcg_gen_addi_tl(addr
, addr
, 8);
2422 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], tcg_env
, addr
, r_asi
, r_mop
);
2423 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2426 g_assert_not_reached();
2433 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2434 int insn
, int size
, int rd
)
2436 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2443 case GET_ASI_DIRECT
:
2444 gen_address_mask(dc
, addr
);
2447 d32
= gen_load_fpr_F(dc
, rd
);
2448 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2451 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2452 da
.memop
| MO_ALIGN_4
);
2455 /* Only 4-byte alignment required. However, it is legal for the
2456 cpu to signal the alignment fault, and the OS trap handler is
2457 required to fix it up. Requiring 16-byte alignment here avoids
2458 having to probe the second page before performing the first
2460 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2461 da
.memop
| MO_ALIGN_16
);
2462 tcg_gen_addi_tl(addr
, addr
, 8);
2463 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2466 g_assert_not_reached();
2471 /* Valid for stdfa on aligned registers only. */
2472 if (size
== 8 && (rd
& 7) == 0) {
2477 gen_address_mask(dc
, addr
);
2479 /* The first operation checks required alignment. */
2480 memop
= da
.memop
| MO_ALIGN_64
;
2481 eight
= tcg_constant_tl(8);
2482 for (i
= 0; ; ++i
) {
2483 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2488 tcg_gen_add_tl(addr
, addr
, eight
);
2492 gen_exception(dc
, TT_ILL_INSN
);
2497 /* Valid for stdfa only. */
2499 gen_address_mask(dc
, addr
);
2500 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2501 da
.memop
| MO_ALIGN
);
2503 gen_exception(dc
, TT_ILL_INSN
);
2508 /* According to the table in the UA2011 manual, the only
2509 other asis that are valid for ldfa/lddfa/ldqfa are
2510 the PST* asis, which aren't currently handled. */
2511 gen_exception(dc
, TT_ILL_INSN
);
2516 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2518 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2519 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2520 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2526 case GET_ASI_DTWINX
:
2527 gen_address_mask(dc
, addr
);
2528 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2529 tcg_gen_addi_tl(addr
, addr
, 8);
2530 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2533 case GET_ASI_DIRECT
:
2535 TCGv_i64 tmp
= tcg_temp_new_i64();
2537 gen_address_mask(dc
, addr
);
2538 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2540 /* Note that LE ldda acts as if each 32-bit register
2541 result is byte swapped. Having just performed one
2542 64-bit bswap, we need now to swap the writebacks. */
2543 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2544 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2546 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2552 /* ??? In theory we've handled all of the ASIs that are valid
2553 for ldda, and this should raise DAE_invalid_asi. However,
2554 real hardware allows others. This can be seen with e.g.
2555 FreeBSD 10.3 wrt ASI_IC_TAG. */
2557 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2558 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2559 TCGv_i64 tmp
= tcg_temp_new_i64();
2562 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2565 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2566 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2568 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2574 gen_store_gpr(dc
, rd
, hi
);
2575 gen_store_gpr(dc
, rd
+ 1, lo
);
2578 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2581 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2582 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2588 case GET_ASI_DTWINX
:
2589 gen_address_mask(dc
, addr
);
2590 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2591 tcg_gen_addi_tl(addr
, addr
, 8);
2592 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2595 case GET_ASI_DIRECT
:
2597 TCGv_i64 t64
= tcg_temp_new_i64();
2599 /* Note that LE stda acts as if each 32-bit register result is
2600 byte swapped. We will perform one 64-bit LE store, so now
2601 we must swap the order of the construction. */
2602 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2603 tcg_gen_concat32_i64(t64
, lo
, hi
);
2605 tcg_gen_concat32_i64(t64
, hi
, lo
);
2607 gen_address_mask(dc
, addr
);
2608 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2613 /* ??? In theory we've handled all of the ASIs that are valid
2614 for stda, and this should raise DAE_invalid_asi. */
2616 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2617 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2618 TCGv_i64 t64
= tcg_temp_new_i64();
2621 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2622 tcg_gen_concat32_i64(t64
, lo
, hi
);
2624 tcg_gen_concat32_i64(t64
, hi
, lo
);
2628 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2634 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2637 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2643 case GET_ASI_DIRECT
:
2644 oldv
= tcg_temp_new();
2645 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2646 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2647 gen_store_gpr(dc
, rd
, oldv
);
2650 /* ??? Should be DAE_invalid_asi. */
2651 gen_exception(dc
, TT_DATA_ACCESS
);
2656 #elif !defined(CONFIG_USER_ONLY)
2657 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2659 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2660 whereby "rd + 1" elicits "error: array subscript is above array".
2661 Since we have already asserted that rd is even, the semantics
2663 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2664 TCGv hi
= gen_dest_gpr(dc
, rd
);
2665 TCGv_i64 t64
= tcg_temp_new_i64();
2666 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2671 case GET_ASI_DIRECT
:
2672 gen_address_mask(dc
, addr
);
2673 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2677 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2678 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2681 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2686 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2687 gen_store_gpr(dc
, rd
| 1, lo
);
2688 gen_store_gpr(dc
, rd
, hi
);
2691 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2694 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2695 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2696 TCGv_i64 t64
= tcg_temp_new_i64();
2698 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2703 case GET_ASI_DIRECT
:
2704 gen_address_mask(dc
, addr
);
2705 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2708 /* Store 32 bytes of T64 to ADDR. */
2709 /* ??? The original qemu code suggests 8-byte alignment, dropping
2710 the low bits, but the only place I can see this used is in the
2711 Linux kernel with 32 byte alignment, which would make more sense
2712 as a cacheline-style operation. */
2714 TCGv d_addr
= tcg_temp_new();
2715 TCGv eight
= tcg_constant_tl(8);
2718 tcg_gen_andi_tl(d_addr
, addr
, -8);
2719 for (i
= 0; i
< 32; i
+= 8) {
2720 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2721 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2727 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2728 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2731 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2738 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2740 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2741 return gen_load_gpr(dc
, rs1
);
2744 #ifdef TARGET_SPARC64
2745 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2747 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2749 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2750 or fold the comparison down to 32 bits and use movcond_i32. Choose
2752 c32
= tcg_temp_new_i32();
2754 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2756 TCGv_i64 c64
= tcg_temp_new_i64();
2757 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2758 tcg_gen_extrl_i64_i32(c32
, c64
);
2761 s1
= gen_load_fpr_F(dc
, rs
);
2762 s2
= gen_load_fpr_F(dc
, rd
);
2763 dst
= gen_dest_fpr_F(dc
);
2764 zero
= tcg_constant_i32(0);
2766 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2768 gen_store_fpr_F(dc
, rd
, dst
);
2771 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2773 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2774 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2775 gen_load_fpr_D(dc
, rs
),
2776 gen_load_fpr_D(dc
, rd
));
2777 gen_store_fpr_D(dc
, rd
, dst
);
2780 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2782 int qd
= QFPREG(rd
);
2783 int qs
= QFPREG(rs
);
2785 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2786 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2787 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2788 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2790 gen_update_fprs_dirty(dc
, qd
);
2793 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2795 TCGv_i32 r_tl
= tcg_temp_new_i32();
2797 /* load env->tl into r_tl */
2798 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2800 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2801 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2803 /* calculate offset to current trap state from env->ts, reuse r_tl */
2804 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2805 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2807 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2809 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2810 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2811 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2815 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2816 int width
, bool cc
, bool left
)
2819 uint64_t amask
, tabl
, tabr
;
2820 int shift
, imask
, omask
;
2823 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2824 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2825 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2826 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2827 dc
->cc_op
= CC_OP_SUB
;
2830 /* Theory of operation: there are two tables, left and right (not to
2831 be confused with the left and right versions of the opcode). These
2832 are indexed by the low 3 bits of the inputs. To make things "easy",
2833 these tables are loaded into two constants, TABL and TABR below.
2834 The operation index = (input & imask) << shift calculates the index
2835 into the constant, while val = (table >> index) & omask calculates
2836 the value we're looking for. */
2843 tabl
= 0x80c0e0f0f8fcfeffULL
;
2844 tabr
= 0xff7f3f1f0f070301ULL
;
2846 tabl
= 0x0103070f1f3f7fffULL
;
2847 tabr
= 0xfffefcf8f0e0c080ULL
;
2867 tabl
= (2 << 2) | 3;
2868 tabr
= (3 << 2) | 1;
2870 tabl
= (1 << 2) | 3;
2871 tabr
= (3 << 2) | 2;
2878 lo1
= tcg_temp_new();
2879 lo2
= tcg_temp_new();
2880 tcg_gen_andi_tl(lo1
, s1
, imask
);
2881 tcg_gen_andi_tl(lo2
, s2
, imask
);
2882 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2883 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2885 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
2886 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
2887 tcg_gen_andi_tl(lo1
, lo1
, omask
);
2888 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2892 amask
&= 0xffffffffULL
;
2894 tcg_gen_andi_tl(s1
, s1
, amask
);
2895 tcg_gen_andi_tl(s2
, s2
, amask
);
2897 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2898 tcg_gen_and_tl(lo2
, lo2
, lo1
);
2899 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
2902 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2904 TCGv tmp
= tcg_temp_new();
2906 tcg_gen_add_tl(tmp
, s1
, s2
);
2907 tcg_gen_andi_tl(dst
, tmp
, -8);
2909 tcg_gen_neg_tl(tmp
, tmp
);
2911 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2914 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2918 t1
= tcg_temp_new();
2919 t2
= tcg_temp_new();
2920 shift
= tcg_temp_new();
2922 tcg_gen_andi_tl(shift
, gsr
, 7);
2923 tcg_gen_shli_tl(shift
, shift
, 3);
2924 tcg_gen_shl_tl(t1
, s1
, shift
);
2926 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2927 shift of (up to 63) followed by a constant shift of 1. */
2928 tcg_gen_xori_tl(shift
, shift
, 63);
2929 tcg_gen_shr_tl(t2
, s2
, shift
);
2930 tcg_gen_shri_tl(t2
, t2
, 1);
2932 tcg_gen_or_tl(dst
, t1
, t2
);
2936 /* Include the auto-generated decoder. */
2937 #include "decode-insns.c.inc"
2939 #define TRANS(NAME, AVAIL, FUNC, ...) \
2940 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2941 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2943 #define avail_ALL(C) true
2944 #ifdef TARGET_SPARC64
2945 # define avail_32(C) false
2946 # define avail_ASR17(C) false
2947 # define avail_DIV(C) true
2948 # define avail_MUL(C) true
2949 # define avail_POWERDOWN(C) false
2950 # define avail_64(C) true
2951 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2952 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2954 # define avail_32(C) true
2955 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2956 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2957 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2958 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2959 # define avail_64(C) false
2960 # define avail_GL(C) false
2961 # define avail_HYPV(C) false
2964 /* Default case for non jump instructions. */
2965 static bool advance_pc(DisasContext
*dc
)
2970 case DYNAMIC_PC_LOOKUP
:
2975 /* we can do a static jump */
2976 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
2977 dc
->base
.is_jmp
= DISAS_NORETURN
;
2980 g_assert_not_reached();
2984 dc
->npc
= dc
->npc
+ 4;
2990 * Major opcodes 00 and 01 -- branches, call, and sethi
2993 static bool advance_jump_uncond_never(DisasContext
*dc
, bool annul
)
2996 dc
->pc
= dc
->npc
+ 4;
2997 dc
->npc
= dc
->pc
+ 4;
3000 dc
->npc
= dc
->pc
+ 4;
3005 static bool advance_jump_uncond_always(DisasContext
*dc
, bool annul
,
3014 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
3019 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
3020 bool annul
, target_ulong dest
)
3022 target_ulong npc
= dc
->npc
;
3025 TCGLabel
*l1
= gen_new_label();
3027 tcg_gen_brcond_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
3028 gen_goto_tb(dc
, 0, npc
, dest
);
3030 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
3032 dc
->base
.is_jmp
= DISAS_NORETURN
;
3037 case DYNAMIC_PC_LOOKUP
:
3038 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
3039 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
3040 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
3042 tcg_constant_tl(dest
), cpu_npc
);
3046 g_assert_not_reached();
3050 dc
->jump_pc
[0] = dest
;
3051 dc
->jump_pc
[1] = npc
+ 4;
3054 tcg_gen_mov_tl(cpu_cond
, cmp
->c1
);
3056 tcg_gen_setcond_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
3063 static bool raise_priv(DisasContext
*dc
)
3065 gen_exception(dc
, TT_PRIV_INSN
);
3069 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
3071 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3076 return advance_jump_uncond_never(dc
, a
->a
);
3078 return advance_jump_uncond_always(dc
, a
->a
, target
);
3082 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
3083 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3087 TRANS(Bicc
, ALL
, do_bpcc
, a
)
3088 TRANS(BPcc
, 64, do_bpcc
, a
)
3090 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
3092 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3095 if (gen_trap_ifnofpu(dc
)) {
3100 return advance_jump_uncond_never(dc
, a
->a
);
3102 return advance_jump_uncond_always(dc
, a
->a
, target
);
3106 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
3107 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3111 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
3112 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
3114 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
3116 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3119 if (!avail_64(dc
)) {
3122 if (gen_tcg_cond_reg
[a
->cond
] == TCG_COND_NEVER
) {
3127 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
3128 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3131 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
3133 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3135 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
3141 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
3144 * For sparc32, always generate the no-coprocessor exception.
3145 * For sparc64, always generate illegal instruction.
3147 #ifdef TARGET_SPARC64
3150 gen_exception(dc
, TT_NCP_INSN
);
3155 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
3157 /* Special-case %g0 because that's the canonical nop. */
3159 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
3161 return advance_pc(dc
);
3165 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3168 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
3169 int rs1
, bool imm
, int rs2_or_imm
)
3171 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3172 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3179 return advance_pc(dc
);
3183 * Immediate traps are the most common case. Since this value is
3184 * live across the branch, it really pays to evaluate the constant.
3186 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
3187 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
3189 trap
= tcg_temp_new_i32();
3190 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
3192 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
3194 TCGv_i32 t2
= tcg_temp_new_i32();
3195 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
3196 tcg_gen_add_i32(trap
, trap
, t2
);
3198 tcg_gen_andi_i32(trap
, trap
, mask
);
3199 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3205 gen_helper_raise_exception(tcg_env
, trap
);
3206 dc
->base
.is_jmp
= DISAS_NORETURN
;
3210 /* Conditional trap. */
3212 lab
= delay_exceptionv(dc
, trap
);
3213 gen_compare(&cmp
, cc
, cond
, dc
);
3214 tcg_gen_brcond_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
3216 return advance_pc(dc
);
3219 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
3221 if (avail_32(dc
) && a
->cc
) {
3224 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
3227 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
3232 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
3235 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
3240 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
3243 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
3245 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
3246 return advance_pc(dc
);
3249 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
3255 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3256 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
3259 /* For #Sync, etc, end the TB to recognize interrupts. */
3260 dc
->base
.is_jmp
= DISAS_EXIT
;
3262 return advance_pc(dc
);
3265 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
3266 TCGv (*func
)(DisasContext
*, TCGv
))
3269 return raise_priv(dc
);
3271 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
3272 return advance_pc(dc
);
3275 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
3280 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
3283 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
3284 * 32-bit cpus like sparcv7, which ignores the rs1 field.
3285 * This matches after all other ASR, so Leon3 Asr17 is handled first.
3287 if (avail_64(dc
) && a
->rs1
!= 0) {
3290 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
3293 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
3298 * TODO: There are many more fields to be filled,
3299 * some of which are writable.
3301 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
3302 val
|= 1 << 8; /* [8] V8 */
3304 return tcg_constant_tl(val
);
3307 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
3309 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
3312 gen_helper_rdccr(dst
, tcg_env
);
3316 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
3318 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
3320 #ifdef TARGET_SPARC64
3321 return tcg_constant_tl(dc
->asi
);
3323 qemu_build_not_reached();
3327 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
3329 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
3331 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3333 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3334 if (translator_io_start(&dc
->base
)) {
3335 dc
->base
.is_jmp
= DISAS_EXIT
;
3337 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3338 tcg_constant_i32(dc
->mem_idx
));
3342 /* TODO: non-priv access only allowed when enabled. */
3343 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
3345 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
3347 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
3350 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
3352 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
3354 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
3358 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
3360 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
3362 gen_trap_ifnofpu(dc
);
3366 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
3368 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
3370 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
3374 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
3376 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
3378 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3382 /* TODO: non-priv access only allowed when enabled. */
3383 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
3385 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
3387 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3389 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3390 if (translator_io_start(&dc
->base
)) {
3391 dc
->base
.is_jmp
= DISAS_EXIT
;
3393 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3394 tcg_constant_i32(dc
->mem_idx
));
3398 /* TODO: non-priv access only allowed when enabled. */
3399 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
3401 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
3403 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3407 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3408 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
3411 * UltraSPARC-T1 Strand status.
3412 * HYPV check maybe not enough, UA2005 & UA2007 describe
3413 * this ASR as impl. dep
3415 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
3417 return tcg_constant_tl(1);
3420 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
3422 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
3425 gen_helper_rdpsr(dst
, tcg_env
);
3429 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
3431 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
3433 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
3437 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
3439 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
3441 TCGv_i32 tl
= tcg_temp_new_i32();
3442 TCGv_ptr tp
= tcg_temp_new_ptr();
3444 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3445 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3446 tcg_gen_shli_i32(tl
, tl
, 3);
3447 tcg_gen_ext_i32_ptr(tp
, tl
);
3448 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3450 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
3454 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
3456 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
3458 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
3462 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
3464 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
3466 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
3470 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
3472 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
3474 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
3478 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
3480 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
3482 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3486 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
3489 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
3491 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
3495 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
3497 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
3499 #ifdef TARGET_SPARC64
3500 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3502 gen_load_trap_state_at_tl(r_tsptr
);
3503 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
3506 qemu_build_not_reached();
3510 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
3512 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
3514 #ifdef TARGET_SPARC64
3515 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3517 gen_load_trap_state_at_tl(r_tsptr
);
3518 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
3521 qemu_build_not_reached();
3525 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
3527 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
3529 #ifdef TARGET_SPARC64
3530 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3532 gen_load_trap_state_at_tl(r_tsptr
);
3533 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
3536 qemu_build_not_reached();
3540 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
3542 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
3544 #ifdef TARGET_SPARC64
3545 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3547 gen_load_trap_state_at_tl(r_tsptr
);
3548 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
3551 qemu_build_not_reached();
3555 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
3556 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
3558 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
3563 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3564 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3566 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
3568 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
3572 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
3574 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
3576 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
3580 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
3582 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
3584 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
3588 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
3590 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
3592 gen_helper_rdcwp(dst
, tcg_env
);
3596 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
3598 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
3600 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
3604 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
3606 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
3608 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
3612 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
3615 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3617 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3621 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3623 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3625 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3629 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3631 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3633 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3637 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3639 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3641 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3645 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3647 /* UA2005 strand status */
3648 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3650 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3654 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3656 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3658 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3662 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3664 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3667 gen_helper_flushw(tcg_env
);
3668 return advance_pc(dc
);
3673 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3674 void (*func
)(DisasContext
*, TCGv
))
3678 /* For simplicity, we under-decoded the rs2 form. */
3679 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3683 return raise_priv(dc
);
3686 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3687 src
= tcg_constant_tl(a
->rs2_or_imm
);
3689 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3690 if (a
->rs2_or_imm
== 0) {
3693 src
= tcg_temp_new();
3695 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3697 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3702 return advance_pc(dc
);
3705 static void do_wry(DisasContext
*dc
, TCGv src
)
3707 tcg_gen_ext32u_tl(cpu_y
, src
);
3710 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3712 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3714 gen_helper_wrccr(tcg_env
, src
);
3717 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3719 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3721 TCGv tmp
= tcg_temp_new();
3723 tcg_gen_ext8u_tl(tmp
, src
);
3724 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3725 /* End TB to notice changed ASI. */
3726 dc
->base
.is_jmp
= DISAS_EXIT
;
3729 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3731 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3733 #ifdef TARGET_SPARC64
3734 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3736 dc
->base
.is_jmp
= DISAS_EXIT
;
3738 qemu_build_not_reached();
3742 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3744 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3746 gen_trap_ifnofpu(dc
);
3747 tcg_gen_mov_tl(cpu_gsr
, src
);
3750 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3752 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3754 gen_helper_set_softint(tcg_env
, src
);
3757 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3759 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3761 gen_helper_clear_softint(tcg_env
, src
);
3764 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3766 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3768 gen_helper_write_softint(tcg_env
, src
);
3771 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3773 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3775 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3777 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3778 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3779 translator_io_start(&dc
->base
);
3780 gen_helper_tick_set_limit(r_tickptr
, src
);
3781 /* End TB to handle timer interrupt */
3782 dc
->base
.is_jmp
= DISAS_EXIT
;
3785 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3787 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3789 #ifdef TARGET_SPARC64
3790 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3792 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3793 translator_io_start(&dc
->base
);
3794 gen_helper_tick_set_count(r_tickptr
, src
);
3795 /* End TB to handle timer interrupt */
3796 dc
->base
.is_jmp
= DISAS_EXIT
;
3798 qemu_build_not_reached();
3802 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3804 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3806 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3808 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3809 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3810 translator_io_start(&dc
->base
);
3811 gen_helper_tick_set_limit(r_tickptr
, src
);
3812 /* End TB to handle timer interrupt */
3813 dc
->base
.is_jmp
= DISAS_EXIT
;
3816 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3818 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3821 gen_helper_power_down(tcg_env
);
3824 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3826 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3828 gen_helper_wrpsr(tcg_env
, src
);
3829 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3830 dc
->cc_op
= CC_OP_FLAGS
;
3831 dc
->base
.is_jmp
= DISAS_EXIT
;
3834 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3836 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3838 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3839 TCGv tmp
= tcg_temp_new();
3841 tcg_gen_andi_tl(tmp
, src
, mask
);
3842 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3845 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3847 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3849 #ifdef TARGET_SPARC64
3850 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3852 gen_load_trap_state_at_tl(r_tsptr
);
3853 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3855 qemu_build_not_reached();
3859 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3861 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3863 #ifdef TARGET_SPARC64
3864 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3866 gen_load_trap_state_at_tl(r_tsptr
);
3867 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3869 qemu_build_not_reached();
3873 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3875 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3877 #ifdef TARGET_SPARC64
3878 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3880 gen_load_trap_state_at_tl(r_tsptr
);
3881 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3883 qemu_build_not_reached();
3887 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3889 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3891 #ifdef TARGET_SPARC64
3892 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3894 gen_load_trap_state_at_tl(r_tsptr
);
3895 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3897 qemu_build_not_reached();
3901 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3903 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3905 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3907 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3908 translator_io_start(&dc
->base
);
3909 gen_helper_tick_set_count(r_tickptr
, src
);
3910 /* End TB to handle timer interrupt */
3911 dc
->base
.is_jmp
= DISAS_EXIT
;
3914 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3916 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3918 tcg_gen_mov_tl(cpu_tbr
, src
);
3921 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3923 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3926 if (translator_io_start(&dc
->base
)) {
3927 dc
->base
.is_jmp
= DISAS_EXIT
;
3929 gen_helper_wrpstate(tcg_env
, src
);
3930 dc
->npc
= DYNAMIC_PC
;
3933 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3935 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3938 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3939 dc
->npc
= DYNAMIC_PC
;
3942 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3944 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3946 if (translator_io_start(&dc
->base
)) {
3947 dc
->base
.is_jmp
= DISAS_EXIT
;
3949 gen_helper_wrpil(tcg_env
, src
);
3952 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3954 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3956 gen_helper_wrcwp(tcg_env
, src
);
3959 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3961 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3963 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3966 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3968 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3970 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3973 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3975 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3977 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3980 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3982 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3984 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3987 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3989 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3991 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3994 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3996 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3998 gen_helper_wrgl(tcg_env
, src
);
4001 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
4003 /* UA2005 strand status */
4004 static void do_wrssr(DisasContext
*dc
, TCGv src
)
4006 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
4009 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
4011 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
4013 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
4015 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
4016 dc
->base
.is_jmp
= DISAS_EXIT
;
4019 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
4021 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
4023 TCGv_i32 tl
= tcg_temp_new_i32();
4024 TCGv_ptr tp
= tcg_temp_new_ptr();
4026 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
4027 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
4028 tcg_gen_shli_i32(tl
, tl
, 3);
4029 tcg_gen_ext_i32_ptr(tp
, tl
);
4030 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
4032 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
4035 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
4037 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
4039 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
4042 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
4044 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
4046 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
4049 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
4051 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
4053 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
4055 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
4056 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
4057 translator_io_start(&dc
->base
);
4058 gen_helper_tick_set_limit(r_tickptr
, src
);
4059 /* End TB to handle timer interrupt */
4060 dc
->base
.is_jmp
= DISAS_EXIT
;
4063 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
4066 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
4068 if (!supervisor(dc
)) {
4069 return raise_priv(dc
);
4072 gen_helper_saved(tcg_env
);
4074 gen_helper_restored(tcg_env
);
4076 return advance_pc(dc
);
4079 TRANS(SAVED
, 64, do_saved_restored
, true)
4080 TRANS(RESTORED
, 64, do_saved_restored
, false)
4082 static bool trans_NOP(DisasContext
*dc
, arg_NOP
*a
)
4084 return advance_pc(dc
);
4087 static bool trans_NOP_v7(DisasContext
*dc
, arg_NOP_v7
*a
)
4090 * TODO: Need a feature bit for sparcv8.
4091 * In the meantime, treat all 32-bit cpus like sparcv7.
4094 return advance_pc(dc
);
4099 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
4100 void (*func
)(TCGv
, TCGv
, TCGv
),
4101 void (*funci
)(TCGv
, TCGv
, target_long
))
4105 /* For simplicity, we under-decoded the rs2 form. */
4106 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4113 dst
= gen_dest_gpr(dc
, a
->rd
);
4115 src1
= gen_load_gpr(dc
, a
->rs1
);
4117 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4119 funci(dst
, src1
, a
->rs2_or_imm
);
4121 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
4124 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4126 gen_store_gpr(dc
, a
->rd
, dst
);
4129 tcg_gen_movi_i32(cpu_cc_op
, cc_op
);
4132 return advance_pc(dc
);
4135 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
4136 void (*func
)(TCGv
, TCGv
, TCGv
),
4137 void (*funci
)(TCGv
, TCGv
, target_long
),
4138 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
4142 return do_arith_int(dc
, a
, cc_op
, func_cc
, NULL
);
4144 return do_arith_int(dc
, a
, cc_op
, func
, funci
);
4147 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
4148 void (*func
)(TCGv
, TCGv
, TCGv
),
4149 void (*funci
)(TCGv
, TCGv
, target_long
))
4151 return do_arith_int(dc
, a
, CC_OP_LOGIC
, func
, funci
);
4154 TRANS(ADD
, ALL
, do_arith
, a
, CC_OP_ADD
,
4155 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
)
4156 TRANS(SUB
, ALL
, do_arith
, a
, CC_OP_SUB
,
4157 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
)
4159 TRANS(TADDcc
, ALL
, do_arith
, a
, CC_OP_TADD
, NULL
, NULL
, gen_op_add_cc
)
4160 TRANS(TSUBcc
, ALL
, do_arith
, a
, CC_OP_TSUB
, NULL
, NULL
, gen_op_sub_cc
)
4161 TRANS(TADDccTV
, ALL
, do_arith
, a
, CC_OP_TADDTV
, NULL
, NULL
, gen_op_taddcctv
)
4162 TRANS(TSUBccTV
, ALL
, do_arith
, a
, CC_OP_TSUBTV
, NULL
, NULL
, gen_op_tsubcctv
)
4164 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
4165 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
4166 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
4167 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
4168 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
4170 TRANS(MULX
, 64, do_arith
, a
, -1, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
4171 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
4172 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
4174 TRANS(UDIVX
, 64, do_arith
, a
, -1, gen_op_udivx
, NULL
, NULL
)
4175 TRANS(SDIVX
, 64, do_arith
, a
, -1, gen_op_sdivx
, NULL
, NULL
)
4176 TRANS(UDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_udiv
, NULL
, gen_op_udivcc
)
4177 TRANS(SDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
4179 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
4180 TRANS(POPC
, 64, do_arith
, a
, -1, gen_op_popc
, NULL
, NULL
)
4182 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4184 /* OR with %g0 is the canonical alias for MOV. */
4185 if (!a
->cc
&& a
->rs1
== 0) {
4186 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4187 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
4188 } else if (a
->rs2_or_imm
& ~0x1f) {
4189 /* For simplicity, we under-decoded the rs2 form. */
4192 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
4194 return advance_pc(dc
);
4196 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
4199 static bool trans_ADDC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4201 switch (dc
->cc_op
) {
4204 /* Carry is known to be zero. Fall back to plain ADD. */
4205 return do_arith(dc
, a
, CC_OP_ADD
,
4206 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
);
4210 return do_arith(dc
, a
, CC_OP_ADDX
,
4211 gen_op_addc_add
, NULL
, gen_op_addccc_add
);
4215 return do_arith(dc
, a
, CC_OP_ADDX
,
4216 gen_op_addc_sub
, NULL
, gen_op_addccc_sub
);
4218 return do_arith(dc
, a
, CC_OP_ADDX
,
4219 gen_op_addc_generic
, NULL
, gen_op_addccc_generic
);
4223 static bool trans_SUBC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4225 switch (dc
->cc_op
) {
4228 /* Carry is known to be zero. Fall back to plain SUB. */
4229 return do_arith(dc
, a
, CC_OP_SUB
,
4230 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
);
4234 return do_arith(dc
, a
, CC_OP_SUBX
,
4235 gen_op_subc_add
, NULL
, gen_op_subccc_add
);
4239 return do_arith(dc
, a
, CC_OP_SUBX
,
4240 gen_op_subc_sub
, NULL
, gen_op_subccc_sub
);
4242 return do_arith(dc
, a
, CC_OP_SUBX
,
4243 gen_op_subc_generic
, NULL
, gen_op_subccc_generic
);
4247 static bool trans_MULScc(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4250 return do_arith(dc
, a
, CC_OP_ADD
, NULL
, NULL
, gen_op_mulscc
);
4253 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
4255 TCGv dst
, src1
, src2
;
4257 /* Reject 64-bit shifts for sparc32. */
4258 if (avail_32(dc
) && a
->x
) {
4262 src2
= tcg_temp_new();
4263 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
4264 src1
= gen_load_gpr(dc
, a
->rs1
);
4265 dst
= gen_dest_gpr(dc
, a
->rd
);
4268 tcg_gen_shl_tl(dst
, src1
, src2
);
4270 tcg_gen_ext32u_tl(dst
, dst
);
4274 tcg_gen_ext32u_tl(dst
, src1
);
4277 tcg_gen_shr_tl(dst
, src1
, src2
);
4280 tcg_gen_ext32s_tl(dst
, src1
);
4283 tcg_gen_sar_tl(dst
, src1
, src2
);
4285 gen_store_gpr(dc
, a
->rd
, dst
);
4286 return advance_pc(dc
);
4289 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
4290 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
4291 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
4293 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
4297 /* Reject 64-bit shifts for sparc32. */
4298 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
4302 src1
= gen_load_gpr(dc
, a
->rs1
);
4303 dst
= gen_dest_gpr(dc
, a
->rd
);
4305 if (avail_32(dc
) || a
->x
) {
4307 tcg_gen_shli_tl(dst
, src1
, a
->i
);
4309 tcg_gen_shri_tl(dst
, src1
, a
->i
);
4311 tcg_gen_sari_tl(dst
, src1
, a
->i
);
4315 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4317 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4319 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4322 gen_store_gpr(dc
, a
->rd
, dst
);
4323 return advance_pc(dc
);
4326 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
4327 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
4328 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
4330 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
4332 /* For simplicity, we under-decoded the rs2 form. */
4333 if (!imm
&& rs2_or_imm
& ~0x1f) {
4336 if (imm
|| rs2_or_imm
== 0) {
4337 return tcg_constant_tl(rs2_or_imm
);
4339 return cpu_regs
[rs2_or_imm
];
4343 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
4345 TCGv dst
= gen_load_gpr(dc
, rd
);
4347 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
, src2
, dst
);
4348 gen_store_gpr(dc
, rd
, dst
);
4349 return advance_pc(dc
);
4352 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
4354 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4360 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4361 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4364 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
4366 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4372 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4373 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4376 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
4378 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4384 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
4385 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4388 static bool do_add_special(DisasContext
*dc
, arg_r_r_ri
*a
,
4389 bool (*func
)(DisasContext
*dc
, int rd
, TCGv src
))
4393 /* For simplicity, we under-decoded the rs2 form. */
4394 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4399 * Always load the sum into a new temporary.
4400 * This is required to capture the value across a window change,
4401 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4403 sum
= tcg_temp_new();
4404 src1
= gen_load_gpr(dc
, a
->rs1
);
4405 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4406 tcg_gen_addi_tl(sum
, src1
, a
->rs2_or_imm
);
4408 tcg_gen_add_tl(sum
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4410 return func(dc
, a
->rd
, sum
);
4413 static bool do_jmpl(DisasContext
*dc
, int rd
, TCGv src
)
4416 * Preserve pc across advance, so that we can delay
4417 * the writeback to rd until after src is consumed.
4419 target_ulong cur_pc
= dc
->pc
;
4421 gen_check_align(dc
, src
, 3);
4424 tcg_gen_mov_tl(cpu_npc
, src
);
4425 gen_address_mask(dc
, cpu_npc
);
4426 gen_store_gpr(dc
, rd
, tcg_constant_tl(cur_pc
));
4428 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4432 TRANS(JMPL
, ALL
, do_add_special
, a
, do_jmpl
)
4434 static bool do_rett(DisasContext
*dc
, int rd
, TCGv src
)
4436 if (!supervisor(dc
)) {
4437 return raise_priv(dc
);
4440 gen_check_align(dc
, src
, 3);
4443 tcg_gen_mov_tl(cpu_npc
, src
);
4444 gen_helper_rett(tcg_env
);
4446 dc
->npc
= DYNAMIC_PC
;
4450 TRANS(RETT
, 32, do_add_special
, a
, do_rett
)
4452 static bool do_return(DisasContext
*dc
, int rd
, TCGv src
)
4454 gen_check_align(dc
, src
, 3);
4457 tcg_gen_mov_tl(cpu_npc
, src
);
4458 gen_address_mask(dc
, cpu_npc
);
4460 gen_helper_restore(tcg_env
);
4461 dc
->npc
= DYNAMIC_PC_LOOKUP
;
4465 TRANS(RETURN
, 64, do_add_special
, a
, do_return
)
4467 static bool do_save(DisasContext
*dc
, int rd
, TCGv src
)
4469 gen_helper_save(tcg_env
);
4470 gen_store_gpr(dc
, rd
, src
);
4471 return advance_pc(dc
);
4474 TRANS(SAVE
, ALL
, do_add_special
, a
, do_save
)
4476 static bool do_restore(DisasContext
*dc
, int rd
, TCGv src
)
4478 gen_helper_restore(tcg_env
);
4479 gen_store_gpr(dc
, rd
, src
);
4480 return advance_pc(dc
);
4483 TRANS(RESTORE
, ALL
, do_add_special
, a
, do_restore
)
4485 static bool do_done_retry(DisasContext
*dc
, bool done
)
4487 if (!supervisor(dc
)) {
4488 return raise_priv(dc
);
4490 dc
->npc
= DYNAMIC_PC
;
4491 dc
->pc
= DYNAMIC_PC
;
4492 translator_io_start(&dc
->base
);
4494 gen_helper_done(tcg_env
);
4496 gen_helper_retry(tcg_env
);
4501 TRANS(DONE
, 64, do_done_retry
, true)
4502 TRANS(RETRY
, 64, do_done_retry
, false)
4504 #define CHECK_IU_FEATURE(dc, FEATURE) \
4505 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4507 #define CHECK_FPU_FEATURE(dc, FEATURE) \
4508 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4511 /* before an instruction, dc->pc must be static */
4512 static void disas_sparc_legacy(DisasContext
*dc
, unsigned int insn
)
4514 unsigned int opc
, rs1
, rs2
, rd
;
4516 TCGv cpu_src2
__attribute__((unused
));
4517 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
4518 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
4521 opc
= GET_FIELD(insn
, 0, 1);
4522 rd
= GET_FIELD(insn
, 2, 6);
4526 goto illegal_insn
; /* in decodetree */
4528 g_assert_not_reached(); /* in decodetree */
4529 case 2: /* FPU & Logical Operations */
4531 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4532 TCGv cpu_dst
__attribute__((unused
)) = tcg_temp_new();
4534 if (xop
== 0x34) { /* FPU Operations */
4535 if (gen_trap_ifnofpu(dc
)) {
4538 gen_op_clear_ieee_excp_and_FTT();
4539 rs1
= GET_FIELD(insn
, 13, 17);
4540 rs2
= GET_FIELD(insn
, 27, 31);
4541 xop
= GET_FIELD(insn
, 18, 26);
4544 case 0x1: /* fmovs */
4545 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4546 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4548 case 0x5: /* fnegs */
4549 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
4551 case 0x9: /* fabss */
4552 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
4554 case 0x29: /* fsqrts */
4555 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
4557 case 0x2a: /* fsqrtd */
4558 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
4560 case 0x2b: /* fsqrtq */
4561 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4562 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
4564 case 0x41: /* fadds */
4565 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
4567 case 0x42: /* faddd */
4568 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
4570 case 0x43: /* faddq */
4571 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4572 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
4574 case 0x45: /* fsubs */
4575 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
4577 case 0x46: /* fsubd */
4578 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
4580 case 0x47: /* fsubq */
4581 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4582 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
4584 case 0x49: /* fmuls */
4585 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
4587 case 0x4a: /* fmuld */
4588 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
4590 case 0x4b: /* fmulq */
4591 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4592 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
4594 case 0x4d: /* fdivs */
4595 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
4597 case 0x4e: /* fdivd */
4598 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
4600 case 0x4f: /* fdivq */
4601 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4602 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
4604 case 0x69: /* fsmuld */
4605 CHECK_FPU_FEATURE(dc
, FSMULD
);
4606 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
4608 case 0x6e: /* fdmulq */
4609 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4610 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
4612 case 0xc4: /* fitos */
4613 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
4615 case 0xc6: /* fdtos */
4616 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
4618 case 0xc7: /* fqtos */
4619 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4620 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
4622 case 0xc8: /* fitod */
4623 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
4625 case 0xc9: /* fstod */
4626 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
4628 case 0xcb: /* fqtod */
4629 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4630 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
4632 case 0xcc: /* fitoq */
4633 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4634 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
4636 case 0xcd: /* fstoq */
4637 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4638 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
4640 case 0xce: /* fdtoq */
4641 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4642 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
4644 case 0xd1: /* fstoi */
4645 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
4647 case 0xd2: /* fdtoi */
4648 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
4650 case 0xd3: /* fqtoi */
4651 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4652 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
4654 #ifdef TARGET_SPARC64
4655 case 0x2: /* V9 fmovd */
4656 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4657 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4659 case 0x3: /* V9 fmovq */
4660 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4661 gen_move_Q(dc
, rd
, rs2
);
4663 case 0x6: /* V9 fnegd */
4664 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
4666 case 0x7: /* V9 fnegq */
4667 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4668 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
4670 case 0xa: /* V9 fabsd */
4671 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
4673 case 0xb: /* V9 fabsq */
4674 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4675 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
4677 case 0x81: /* V9 fstox */
4678 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
4680 case 0x82: /* V9 fdtox */
4681 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
4683 case 0x83: /* V9 fqtox */
4684 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4685 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
4687 case 0x84: /* V9 fxtos */
4688 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
4690 case 0x88: /* V9 fxtod */
4691 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
4693 case 0x8c: /* V9 fxtoq */
4694 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4695 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
4701 } else if (xop
== 0x35) { /* FPU Operations */
4702 #ifdef TARGET_SPARC64
4705 if (gen_trap_ifnofpu(dc
)) {
4708 gen_op_clear_ieee_excp_and_FTT();
4709 rs1
= GET_FIELD(insn
, 13, 17);
4710 rs2
= GET_FIELD(insn
, 27, 31);
4711 xop
= GET_FIELD(insn
, 18, 26);
4713 #ifdef TARGET_SPARC64
4717 cond = GET_FIELD_SP(insn, 10, 12); \
4718 cpu_src1 = get_src1(dc, insn); \
4719 gen_compare_reg(&cmp, cond, cpu_src1); \
4720 gen_fmov##sz(dc, &cmp, rd, rs2); \
4723 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
4726 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
4729 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
4730 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4737 #ifdef TARGET_SPARC64
4738 #define FMOVCC(fcc, sz) \
4741 cond = GET_FIELD_SP(insn, 14, 17); \
4742 gen_fcompare(&cmp, fcc, cond); \
4743 gen_fmov##sz(dc, &cmp, rd, rs2); \
4746 case 0x001: /* V9 fmovscc %fcc0 */
4749 case 0x002: /* V9 fmovdcc %fcc0 */
4752 case 0x003: /* V9 fmovqcc %fcc0 */
4753 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4756 case 0x041: /* V9 fmovscc %fcc1 */
4759 case 0x042: /* V9 fmovdcc %fcc1 */
4762 case 0x043: /* V9 fmovqcc %fcc1 */
4763 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4766 case 0x081: /* V9 fmovscc %fcc2 */
4769 case 0x082: /* V9 fmovdcc %fcc2 */
4772 case 0x083: /* V9 fmovqcc %fcc2 */
4773 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4776 case 0x0c1: /* V9 fmovscc %fcc3 */
4779 case 0x0c2: /* V9 fmovdcc %fcc3 */
4782 case 0x0c3: /* V9 fmovqcc %fcc3 */
4783 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4787 #define FMOVCC(xcc, sz) \
4790 cond = GET_FIELD_SP(insn, 14, 17); \
4791 gen_compare(&cmp, xcc, cond, dc); \
4792 gen_fmov##sz(dc, &cmp, rd, rs2); \
4795 case 0x101: /* V9 fmovscc %icc */
4798 case 0x102: /* V9 fmovdcc %icc */
4801 case 0x103: /* V9 fmovqcc %icc */
4802 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4805 case 0x181: /* V9 fmovscc %xcc */
4808 case 0x182: /* V9 fmovdcc %xcc */
4811 case 0x183: /* V9 fmovqcc %xcc */
4812 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4817 case 0x51: /* fcmps, V9 %fcc */
4818 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4819 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
4820 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
4822 case 0x52: /* fcmpd, V9 %fcc */
4823 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4824 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4825 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
4827 case 0x53: /* fcmpq, V9 %fcc */
4828 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4829 gen_op_load_fpr_QT0(QFPREG(rs1
));
4830 gen_op_load_fpr_QT1(QFPREG(rs2
));
4831 gen_op_fcmpq(rd
& 3);
4833 case 0x55: /* fcmpes, V9 %fcc */
4834 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4835 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
4836 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
4838 case 0x56: /* fcmped, V9 %fcc */
4839 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4840 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4841 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
4843 case 0x57: /* fcmpeq, V9 %fcc */
4844 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4845 gen_op_load_fpr_QT0(QFPREG(rs1
));
4846 gen_op_load_fpr_QT1(QFPREG(rs2
));
4847 gen_op_fcmpeq(rd
& 3);
4852 } else if (xop
== 0x36) {
4853 #ifdef TARGET_SPARC64
4855 int opf
= GET_FIELD_SP(insn
, 5, 13);
4856 rs1
= GET_FIELD(insn
, 13, 17);
4857 rs2
= GET_FIELD(insn
, 27, 31);
4858 if (gen_trap_ifnofpu(dc
)) {
4863 case 0x000: /* VIS I edge8cc */
4864 CHECK_FPU_FEATURE(dc
, VIS1
);
4865 cpu_src1
= gen_load_gpr(dc
, rs1
);
4866 cpu_src2
= gen_load_gpr(dc
, rs2
);
4867 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4868 gen_store_gpr(dc
, rd
, cpu_dst
);
4870 case 0x001: /* VIS II edge8n */
4871 CHECK_FPU_FEATURE(dc
, VIS2
);
4872 cpu_src1
= gen_load_gpr(dc
, rs1
);
4873 cpu_src2
= gen_load_gpr(dc
, rs2
);
4874 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4875 gen_store_gpr(dc
, rd
, cpu_dst
);
4877 case 0x002: /* VIS I edge8lcc */
4878 CHECK_FPU_FEATURE(dc
, VIS1
);
4879 cpu_src1
= gen_load_gpr(dc
, rs1
);
4880 cpu_src2
= gen_load_gpr(dc
, rs2
);
4881 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4882 gen_store_gpr(dc
, rd
, cpu_dst
);
4884 case 0x003: /* VIS II edge8ln */
4885 CHECK_FPU_FEATURE(dc
, VIS2
);
4886 cpu_src1
= gen_load_gpr(dc
, rs1
);
4887 cpu_src2
= gen_load_gpr(dc
, rs2
);
4888 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4889 gen_store_gpr(dc
, rd
, cpu_dst
);
4891 case 0x004: /* VIS I edge16cc */
4892 CHECK_FPU_FEATURE(dc
, VIS1
);
4893 cpu_src1
= gen_load_gpr(dc
, rs1
);
4894 cpu_src2
= gen_load_gpr(dc
, rs2
);
4895 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4896 gen_store_gpr(dc
, rd
, cpu_dst
);
4898 case 0x005: /* VIS II edge16n */
4899 CHECK_FPU_FEATURE(dc
, VIS2
);
4900 cpu_src1
= gen_load_gpr(dc
, rs1
);
4901 cpu_src2
= gen_load_gpr(dc
, rs2
);
4902 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4903 gen_store_gpr(dc
, rd
, cpu_dst
);
4905 case 0x006: /* VIS I edge16lcc */
4906 CHECK_FPU_FEATURE(dc
, VIS1
);
4907 cpu_src1
= gen_load_gpr(dc
, rs1
);
4908 cpu_src2
= gen_load_gpr(dc
, rs2
);
4909 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4910 gen_store_gpr(dc
, rd
, cpu_dst
);
4912 case 0x007: /* VIS II edge16ln */
4913 CHECK_FPU_FEATURE(dc
, VIS2
);
4914 cpu_src1
= gen_load_gpr(dc
, rs1
);
4915 cpu_src2
= gen_load_gpr(dc
, rs2
);
4916 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4917 gen_store_gpr(dc
, rd
, cpu_dst
);
4919 case 0x008: /* VIS I edge32cc */
4920 CHECK_FPU_FEATURE(dc
, VIS1
);
4921 cpu_src1
= gen_load_gpr(dc
, rs1
);
4922 cpu_src2
= gen_load_gpr(dc
, rs2
);
4923 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4924 gen_store_gpr(dc
, rd
, cpu_dst
);
4926 case 0x009: /* VIS II edge32n */
4927 CHECK_FPU_FEATURE(dc
, VIS2
);
4928 cpu_src1
= gen_load_gpr(dc
, rs1
);
4929 cpu_src2
= gen_load_gpr(dc
, rs2
);
4930 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4931 gen_store_gpr(dc
, rd
, cpu_dst
);
4933 case 0x00a: /* VIS I edge32lcc */
4934 CHECK_FPU_FEATURE(dc
, VIS1
);
4935 cpu_src1
= gen_load_gpr(dc
, rs1
);
4936 cpu_src2
= gen_load_gpr(dc
, rs2
);
4937 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4938 gen_store_gpr(dc
, rd
, cpu_dst
);
4940 case 0x00b: /* VIS II edge32ln */
4941 CHECK_FPU_FEATURE(dc
, VIS2
);
4942 cpu_src1
= gen_load_gpr(dc
, rs1
);
4943 cpu_src2
= gen_load_gpr(dc
, rs2
);
4944 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4945 gen_store_gpr(dc
, rd
, cpu_dst
);
4947 case 0x010: /* VIS I array8 */
4948 CHECK_FPU_FEATURE(dc
, VIS1
);
4949 cpu_src1
= gen_load_gpr(dc
, rs1
);
4950 cpu_src2
= gen_load_gpr(dc
, rs2
);
4951 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4952 gen_store_gpr(dc
, rd
, cpu_dst
);
4954 case 0x012: /* VIS I array16 */
4955 CHECK_FPU_FEATURE(dc
, VIS1
);
4956 cpu_src1
= gen_load_gpr(dc
, rs1
);
4957 cpu_src2
= gen_load_gpr(dc
, rs2
);
4958 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4959 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4960 gen_store_gpr(dc
, rd
, cpu_dst
);
4962 case 0x014: /* VIS I array32 */
4963 CHECK_FPU_FEATURE(dc
, VIS1
);
4964 cpu_src1
= gen_load_gpr(dc
, rs1
);
4965 cpu_src2
= gen_load_gpr(dc
, rs2
);
4966 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4967 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4968 gen_store_gpr(dc
, rd
, cpu_dst
);
4970 case 0x018: /* VIS I alignaddr */
4971 CHECK_FPU_FEATURE(dc
, VIS1
);
4972 cpu_src1
= gen_load_gpr(dc
, rs1
);
4973 cpu_src2
= gen_load_gpr(dc
, rs2
);
4974 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4975 gen_store_gpr(dc
, rd
, cpu_dst
);
4977 case 0x01a: /* VIS I alignaddrl */
4978 CHECK_FPU_FEATURE(dc
, VIS1
);
4979 cpu_src1
= gen_load_gpr(dc
, rs1
);
4980 cpu_src2
= gen_load_gpr(dc
, rs2
);
4981 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4982 gen_store_gpr(dc
, rd
, cpu_dst
);
4984 case 0x019: /* VIS II bmask */
4985 CHECK_FPU_FEATURE(dc
, VIS2
);
4986 cpu_src1
= gen_load_gpr(dc
, rs1
);
4987 cpu_src2
= gen_load_gpr(dc
, rs2
);
4988 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4989 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4990 gen_store_gpr(dc
, rd
, cpu_dst
);
4992 case 0x020: /* VIS I fcmple16 */
4993 CHECK_FPU_FEATURE(dc
, VIS1
);
4994 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4995 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4996 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4997 gen_store_gpr(dc
, rd
, cpu_dst
);
4999 case 0x022: /* VIS I fcmpne16 */
5000 CHECK_FPU_FEATURE(dc
, VIS1
);
5001 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5002 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5003 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5004 gen_store_gpr(dc
, rd
, cpu_dst
);
5006 case 0x024: /* VIS I fcmple32 */
5007 CHECK_FPU_FEATURE(dc
, VIS1
);
5008 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5009 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5010 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5011 gen_store_gpr(dc
, rd
, cpu_dst
);
5013 case 0x026: /* VIS I fcmpne32 */
5014 CHECK_FPU_FEATURE(dc
, VIS1
);
5015 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5016 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5017 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5018 gen_store_gpr(dc
, rd
, cpu_dst
);
5020 case 0x028: /* VIS I fcmpgt16 */
5021 CHECK_FPU_FEATURE(dc
, VIS1
);
5022 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5023 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5024 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5025 gen_store_gpr(dc
, rd
, cpu_dst
);
5027 case 0x02a: /* VIS I fcmpeq16 */
5028 CHECK_FPU_FEATURE(dc
, VIS1
);
5029 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5030 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5031 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5032 gen_store_gpr(dc
, rd
, cpu_dst
);
5034 case 0x02c: /* VIS I fcmpgt32 */
5035 CHECK_FPU_FEATURE(dc
, VIS1
);
5036 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5037 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5038 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5039 gen_store_gpr(dc
, rd
, cpu_dst
);
5041 case 0x02e: /* VIS I fcmpeq32 */
5042 CHECK_FPU_FEATURE(dc
, VIS1
);
5043 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5044 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
5045 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
5046 gen_store_gpr(dc
, rd
, cpu_dst
);
5048 case 0x031: /* VIS I fmul8x16 */
5049 CHECK_FPU_FEATURE(dc
, VIS1
);
5050 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
5052 case 0x033: /* VIS I fmul8x16au */
5053 CHECK_FPU_FEATURE(dc
, VIS1
);
5054 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
5056 case 0x035: /* VIS I fmul8x16al */
5057 CHECK_FPU_FEATURE(dc
, VIS1
);
5058 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
5060 case 0x036: /* VIS I fmul8sux16 */
5061 CHECK_FPU_FEATURE(dc
, VIS1
);
5062 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
5064 case 0x037: /* VIS I fmul8ulx16 */
5065 CHECK_FPU_FEATURE(dc
, VIS1
);
5066 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
5068 case 0x038: /* VIS I fmuld8sux16 */
5069 CHECK_FPU_FEATURE(dc
, VIS1
);
5070 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
5072 case 0x039: /* VIS I fmuld8ulx16 */
5073 CHECK_FPU_FEATURE(dc
, VIS1
);
5074 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
5076 case 0x03a: /* VIS I fpack32 */
5077 CHECK_FPU_FEATURE(dc
, VIS1
);
5078 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
5080 case 0x03b: /* VIS I fpack16 */
5081 CHECK_FPU_FEATURE(dc
, VIS1
);
5082 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5083 cpu_dst_32
= gen_dest_fpr_F(dc
);
5084 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5085 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5087 case 0x03d: /* VIS I fpackfix */
5088 CHECK_FPU_FEATURE(dc
, VIS1
);
5089 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5090 cpu_dst_32
= gen_dest_fpr_F(dc
);
5091 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5092 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5094 case 0x03e: /* VIS I pdist */
5095 CHECK_FPU_FEATURE(dc
, VIS1
);
5096 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
5098 case 0x048: /* VIS I faligndata */
5099 CHECK_FPU_FEATURE(dc
, VIS1
);
5100 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
5102 case 0x04b: /* VIS I fpmerge */
5103 CHECK_FPU_FEATURE(dc
, VIS1
);
5104 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
5106 case 0x04c: /* VIS II bshuffle */
5107 CHECK_FPU_FEATURE(dc
, VIS2
);
5108 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
5110 case 0x04d: /* VIS I fexpand */
5111 CHECK_FPU_FEATURE(dc
, VIS1
);
5112 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
5114 case 0x050: /* VIS I fpadd16 */
5115 CHECK_FPU_FEATURE(dc
, VIS1
);
5116 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
5118 case 0x051: /* VIS I fpadd16s */
5119 CHECK_FPU_FEATURE(dc
, VIS1
);
5120 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
5122 case 0x052: /* VIS I fpadd32 */
5123 CHECK_FPU_FEATURE(dc
, VIS1
);
5124 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
5126 case 0x053: /* VIS I fpadd32s */
5127 CHECK_FPU_FEATURE(dc
, VIS1
);
5128 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
5130 case 0x054: /* VIS I fpsub16 */
5131 CHECK_FPU_FEATURE(dc
, VIS1
);
5132 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
5134 case 0x055: /* VIS I fpsub16s */
5135 CHECK_FPU_FEATURE(dc
, VIS1
);
5136 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
5138 case 0x056: /* VIS I fpsub32 */
5139 CHECK_FPU_FEATURE(dc
, VIS1
);
5140 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
5142 case 0x057: /* VIS I fpsub32s */
5143 CHECK_FPU_FEATURE(dc
, VIS1
);
5144 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
5146 case 0x060: /* VIS I fzero */
5147 CHECK_FPU_FEATURE(dc
, VIS1
);
5148 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5149 tcg_gen_movi_i64(cpu_dst_64
, 0);
5150 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5152 case 0x061: /* VIS I fzeros */
5153 CHECK_FPU_FEATURE(dc
, VIS1
);
5154 cpu_dst_32
= gen_dest_fpr_F(dc
);
5155 tcg_gen_movi_i32(cpu_dst_32
, 0);
5156 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5158 case 0x062: /* VIS I fnor */
5159 CHECK_FPU_FEATURE(dc
, VIS1
);
5160 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
5162 case 0x063: /* VIS I fnors */
5163 CHECK_FPU_FEATURE(dc
, VIS1
);
5164 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
5166 case 0x064: /* VIS I fandnot2 */
5167 CHECK_FPU_FEATURE(dc
, VIS1
);
5168 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
5170 case 0x065: /* VIS I fandnot2s */
5171 CHECK_FPU_FEATURE(dc
, VIS1
);
5172 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
5174 case 0x066: /* VIS I fnot2 */
5175 CHECK_FPU_FEATURE(dc
, VIS1
);
5176 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
5178 case 0x067: /* VIS I fnot2s */
5179 CHECK_FPU_FEATURE(dc
, VIS1
);
5180 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
5182 case 0x068: /* VIS I fandnot1 */
5183 CHECK_FPU_FEATURE(dc
, VIS1
);
5184 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
5186 case 0x069: /* VIS I fandnot1s */
5187 CHECK_FPU_FEATURE(dc
, VIS1
);
5188 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
5190 case 0x06a: /* VIS I fnot1 */
5191 CHECK_FPU_FEATURE(dc
, VIS1
);
5192 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
5194 case 0x06b: /* VIS I fnot1s */
5195 CHECK_FPU_FEATURE(dc
, VIS1
);
5196 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
5198 case 0x06c: /* VIS I fxor */
5199 CHECK_FPU_FEATURE(dc
, VIS1
);
5200 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
5202 case 0x06d: /* VIS I fxors */
5203 CHECK_FPU_FEATURE(dc
, VIS1
);
5204 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
5206 case 0x06e: /* VIS I fnand */
5207 CHECK_FPU_FEATURE(dc
, VIS1
);
5208 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
5210 case 0x06f: /* VIS I fnands */
5211 CHECK_FPU_FEATURE(dc
, VIS1
);
5212 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
5214 case 0x070: /* VIS I fand */
5215 CHECK_FPU_FEATURE(dc
, VIS1
);
5216 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
5218 case 0x071: /* VIS I fands */
5219 CHECK_FPU_FEATURE(dc
, VIS1
);
5220 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
5222 case 0x072: /* VIS I fxnor */
5223 CHECK_FPU_FEATURE(dc
, VIS1
);
5224 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
5226 case 0x073: /* VIS I fxnors */
5227 CHECK_FPU_FEATURE(dc
, VIS1
);
5228 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
5230 case 0x074: /* VIS I fsrc1 */
5231 CHECK_FPU_FEATURE(dc
, VIS1
);
5232 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5233 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5235 case 0x075: /* VIS I fsrc1s */
5236 CHECK_FPU_FEATURE(dc
, VIS1
);
5237 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
5238 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5240 case 0x076: /* VIS I fornot2 */
5241 CHECK_FPU_FEATURE(dc
, VIS1
);
5242 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
5244 case 0x077: /* VIS I fornot2s */
5245 CHECK_FPU_FEATURE(dc
, VIS1
);
5246 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
5248 case 0x078: /* VIS I fsrc2 */
5249 CHECK_FPU_FEATURE(dc
, VIS1
);
5250 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5251 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5253 case 0x079: /* VIS I fsrc2s */
5254 CHECK_FPU_FEATURE(dc
, VIS1
);
5255 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
5256 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5258 case 0x07a: /* VIS I fornot1 */
5259 CHECK_FPU_FEATURE(dc
, VIS1
);
5260 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
5262 case 0x07b: /* VIS I fornot1s */
5263 CHECK_FPU_FEATURE(dc
, VIS1
);
5264 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
5266 case 0x07c: /* VIS I for */
5267 CHECK_FPU_FEATURE(dc
, VIS1
);
5268 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
5270 case 0x07d: /* VIS I fors */
5271 CHECK_FPU_FEATURE(dc
, VIS1
);
5272 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
5274 case 0x07e: /* VIS I fone */
5275 CHECK_FPU_FEATURE(dc
, VIS1
);
5276 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5277 tcg_gen_movi_i64(cpu_dst_64
, -1);
5278 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5280 case 0x07f: /* VIS I fones */
5281 CHECK_FPU_FEATURE(dc
, VIS1
);
5282 cpu_dst_32
= gen_dest_fpr_F(dc
);
5283 tcg_gen_movi_i32(cpu_dst_32
, -1);
5284 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5286 case 0x080: /* VIS I shutdown */
5287 case 0x081: /* VIS II siam */
5295 goto illegal_insn
; /* in decodetree */
5299 case 3: /* load/store instructions */
5301 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5302 /* ??? gen_address_mask prevents us from using a source
5303 register directly. Always generate a temporary. */
5304 TCGv cpu_addr
= tcg_temp_new();
5306 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5307 if (xop
== 0x3c || xop
== 0x3e) {
5308 /* V9 casa/casxa : no offset */
5309 } else if (IS_IMM
) { /* immediate */
5310 simm
= GET_FIELDs(insn
, 19, 31);
5312 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5314 } else { /* register */
5315 rs2
= GET_FIELD(insn
, 27, 31);
5317 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5320 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5321 (xop
> 0x17 && xop
<= 0x1d ) ||
5322 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5323 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5326 case 0x0: /* ld, V9 lduw, load unsigned word */
5327 gen_address_mask(dc
, cpu_addr
);
5328 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5329 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5331 case 0x1: /* ldub, load unsigned byte */
5332 gen_address_mask(dc
, cpu_addr
);
5333 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5334 dc
->mem_idx
, MO_UB
);
5336 case 0x2: /* lduh, load unsigned halfword */
5337 gen_address_mask(dc
, cpu_addr
);
5338 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5339 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5341 case 0x3: /* ldd, load double word */
5347 gen_address_mask(dc
, cpu_addr
);
5348 t64
= tcg_temp_new_i64();
5349 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5350 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5351 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5352 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5353 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5354 tcg_gen_shri_i64(t64
, t64
, 32);
5355 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5356 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5359 case 0x9: /* ldsb, load signed byte */
5360 gen_address_mask(dc
, cpu_addr
);
5361 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_SB
);
5363 case 0xa: /* ldsh, load signed halfword */
5364 gen_address_mask(dc
, cpu_addr
);
5365 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5366 dc
->mem_idx
, MO_TESW
| MO_ALIGN
);
5368 case 0xd: /* ldstub */
5369 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5372 /* swap, swap register with memory. Also atomically */
5373 cpu_src1
= gen_load_gpr(dc
, rd
);
5374 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5375 dc
->mem_idx
, MO_TEUL
);
5377 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5378 case 0x10: /* lda, V9 lduwa, load word alternate */
5379 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5381 case 0x11: /* lduba, load unsigned byte alternate */
5382 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5384 case 0x12: /* lduha, load unsigned halfword alternate */
5385 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5387 case 0x13: /* ldda, load double word alternate */
5391 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5393 case 0x19: /* ldsba, load signed byte alternate */
5394 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5396 case 0x1a: /* ldsha, load signed halfword alternate */
5397 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5399 case 0x1d: /* ldstuba -- XXX: should be atomically */
5400 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5402 case 0x1f: /* swapa, swap reg with alt. memory. Also
5404 cpu_src1
= gen_load_gpr(dc
, rd
);
5405 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5408 #ifdef TARGET_SPARC64
5409 case 0x08: /* V9 ldsw */
5410 gen_address_mask(dc
, cpu_addr
);
5411 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5412 dc
->mem_idx
, MO_TESL
| MO_ALIGN
);
5414 case 0x0b: /* V9 ldx */
5415 gen_address_mask(dc
, cpu_addr
);
5416 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5417 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5419 case 0x18: /* V9 ldswa */
5420 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5422 case 0x1b: /* V9 ldxa */
5423 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5425 case 0x2d: /* V9 prefetch, no effect */
5427 case 0x30: /* V9 ldfa */
5428 if (gen_trap_ifnofpu(dc
)) {
5431 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5432 gen_update_fprs_dirty(dc
, rd
);
5434 case 0x33: /* V9 lddfa */
5435 if (gen_trap_ifnofpu(dc
)) {
5438 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5439 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5441 case 0x3d: /* V9 prefetcha, no effect */
5443 case 0x32: /* V9 ldqfa */
5444 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5445 if (gen_trap_ifnofpu(dc
)) {
5448 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5449 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5455 gen_store_gpr(dc
, rd
, cpu_val
);
5456 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5459 } else if (xop
>= 0x20 && xop
< 0x24) {
5460 if (gen_trap_ifnofpu(dc
)) {
5464 case 0x20: /* ldf, load fpreg */
5465 gen_address_mask(dc
, cpu_addr
);
5466 cpu_dst_32
= gen_dest_fpr_F(dc
);
5467 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5468 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5469 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5471 case 0x21: /* ldfsr, V9 ldxfsr */
5472 #ifdef TARGET_SPARC64
5473 gen_address_mask(dc
, cpu_addr
);
5475 TCGv_i64 t64
= tcg_temp_new_i64();
5476 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5477 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5478 gen_helper_ldxfsr(cpu_fsr
, tcg_env
, cpu_fsr
, t64
);
5482 cpu_dst_32
= tcg_temp_new_i32();
5483 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5484 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5485 gen_helper_ldfsr(cpu_fsr
, tcg_env
, cpu_fsr
, cpu_dst_32
);
5487 case 0x22: /* ldqf, load quad fpreg */
5488 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5489 gen_address_mask(dc
, cpu_addr
);
5490 cpu_src1_64
= tcg_temp_new_i64();
5491 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5492 MO_TEUQ
| MO_ALIGN_4
);
5493 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5494 cpu_src2_64
= tcg_temp_new_i64();
5495 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5496 MO_TEUQ
| MO_ALIGN_4
);
5497 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5499 case 0x23: /* lddf, load double fpreg */
5500 gen_address_mask(dc
, cpu_addr
);
5501 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5502 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5503 MO_TEUQ
| MO_ALIGN_4
);
5504 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5509 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5510 xop
== 0xe || xop
== 0x1e) {
5511 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5514 case 0x4: /* st, store word */
5515 gen_address_mask(dc
, cpu_addr
);
5516 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5517 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5519 case 0x5: /* stb, store byte */
5520 gen_address_mask(dc
, cpu_addr
);
5521 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_UB
);
5523 case 0x6: /* sth, store halfword */
5524 gen_address_mask(dc
, cpu_addr
);
5525 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5526 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5528 case 0x7: /* std, store double word */
5535 gen_address_mask(dc
, cpu_addr
);
5536 lo
= gen_load_gpr(dc
, rd
+ 1);
5537 t64
= tcg_temp_new_i64();
5538 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5539 tcg_gen_qemu_st_i64(t64
, cpu_addr
,
5540 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5543 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5544 case 0x14: /* sta, V9 stwa, store word alternate */
5545 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5547 case 0x15: /* stba, store byte alternate */
5548 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5550 case 0x16: /* stha, store halfword alternate */
5551 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5553 case 0x17: /* stda, store double word alternate */
5557 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5560 #ifdef TARGET_SPARC64
5561 case 0x0e: /* V9 stx */
5562 gen_address_mask(dc
, cpu_addr
);
5563 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5564 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5566 case 0x1e: /* V9 stxa */
5567 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5573 } else if (xop
> 0x23 && xop
< 0x28) {
5574 if (gen_trap_ifnofpu(dc
)) {
5578 case 0x24: /* stf, store fpreg */
5579 gen_address_mask(dc
, cpu_addr
);
5580 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5581 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5582 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5584 case 0x25: /* stfsr, V9 stxfsr */
5586 #ifdef TARGET_SPARC64
5587 gen_address_mask(dc
, cpu_addr
);
5589 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5590 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5594 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5595 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5599 #ifdef TARGET_SPARC64
5600 /* V9 stqf, store quad fpreg */
5601 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5602 gen_address_mask(dc
, cpu_addr
);
5603 /* ??? While stqf only requires 4-byte alignment, it is
5604 legal for the cpu to signal the unaligned exception.
5605 The OS trap handler is then required to fix it up.
5606 For qemu, this avoids having to probe the second page
5607 before performing the first write. */
5608 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5609 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5610 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN_16
);
5611 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5612 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5613 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5614 dc
->mem_idx
, MO_TEUQ
);
5616 #else /* !TARGET_SPARC64 */
5617 /* stdfq, store floating point queue */
5618 #if defined(CONFIG_USER_ONLY)
5621 if (!supervisor(dc
))
5623 if (gen_trap_ifnofpu(dc
)) {
5629 case 0x27: /* stdf, store double fpreg */
5630 gen_address_mask(dc
, cpu_addr
);
5631 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5632 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5633 MO_TEUQ
| MO_ALIGN_4
);
5638 } else if (xop
> 0x33 && xop
< 0x3f) {
5640 #ifdef TARGET_SPARC64
5641 case 0x34: /* V9 stfa */
5642 if (gen_trap_ifnofpu(dc
)) {
5645 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5647 case 0x36: /* V9 stqfa */
5649 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5650 if (gen_trap_ifnofpu(dc
)) {
5653 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5656 case 0x37: /* V9 stdfa */
5657 if (gen_trap_ifnofpu(dc
)) {
5660 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5662 case 0x3e: /* V9 casxa */
5663 rs2
= GET_FIELD(insn
, 27, 31);
5664 cpu_src2
= gen_load_gpr(dc
, rs2
);
5665 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5668 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5669 case 0x3c: /* V9 or LEON3 casa */
5670 #ifndef TARGET_SPARC64
5671 CHECK_IU_FEATURE(dc
, CASA
);
5673 rs2
= GET_FIELD(insn
, 27, 31);
5674 cpu_src2
= gen_load_gpr(dc
, rs2
);
5675 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5691 gen_exception(dc
, TT_ILL_INSN
);
5693 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5695 gen_exception(dc
, TT_PRIV_INSN
);
5699 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5701 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5703 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5708 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5710 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5711 CPUSPARCState
*env
= cpu_env(cs
);
5714 dc
->pc
= dc
->base
.pc_first
;
5715 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5716 dc
->cc_op
= CC_OP_DYNAMIC
;
5717 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5718 dc
->def
= &env
->def
;
5719 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5720 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5721 #ifndef CONFIG_USER_ONLY
5722 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5724 #ifdef TARGET_SPARC64
5726 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5727 #ifndef CONFIG_USER_ONLY
5728 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5732 * if we reach a page boundary, we stop generation so that the
5733 * PC of a TT_TFAULT exception is always in the right page
5735 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5736 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5739 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5743 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5745 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5746 target_ulong npc
= dc
->npc
;
5751 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5752 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5755 case DYNAMIC_PC_LOOKUP
:
5759 g_assert_not_reached();
5762 tcg_gen_insn_start(dc
->pc
, npc
);
5765 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5767 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5768 CPUSPARCState
*env
= cpu_env(cs
);
5771 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5772 dc
->base
.pc_next
+= 4;
5774 if (!decode(dc
, insn
)) {
5775 disas_sparc_legacy(dc
, insn
);
5778 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5781 if (dc
->pc
!= dc
->base
.pc_next
) {
5782 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5786 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5788 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5789 DisasDelayException
*e
, *e_next
;
5792 switch (dc
->base
.is_jmp
) {
5794 case DISAS_TOO_MANY
:
5795 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5796 /* static PC and NPC: we can use direct chaining */
5797 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5804 case DYNAMIC_PC_LOOKUP
:
5810 g_assert_not_reached();
5813 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5819 gen_generic_branch(dc
);
5824 case DYNAMIC_PC_LOOKUP
:
5827 g_assert_not_reached();
5830 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5833 tcg_gen_lookup_and_goto_ptr();
5835 tcg_gen_exit_tb(NULL
, 0);
5839 case DISAS_NORETURN
:
5845 tcg_gen_exit_tb(NULL
, 0);
5849 g_assert_not_reached();
5852 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5853 gen_set_label(e
->lab
);
5855 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5856 if (e
->npc
% 4 == 0) {
5857 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5859 gen_helper_raise_exception(tcg_env
, e
->excp
);
5866 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5867 CPUState
*cpu
, FILE *logfile
)
5869 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5870 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5873 static const TranslatorOps sparc_tr_ops
= {
5874 .init_disas_context
= sparc_tr_init_disas_context
,
5875 .tb_start
= sparc_tr_tb_start
,
5876 .insn_start
= sparc_tr_insn_start
,
5877 .translate_insn
= sparc_tr_translate_insn
,
5878 .tb_stop
= sparc_tr_tb_stop
,
5879 .disas_log
= sparc_tr_disas_log
,
5882 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5883 target_ulong pc
, void *host_pc
)
5885 DisasContext dc
= {};
5887 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5890 void sparc_tcg_init(void)
5892 static const char gregnames
[32][4] = {
5893 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5894 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5895 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5896 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5898 static const char fregnames
[32][4] = {
5899 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5900 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5901 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5902 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5905 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5906 #ifdef TARGET_SPARC64
5907 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5908 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5910 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5911 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5914 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5915 #ifdef TARGET_SPARC64
5916 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5918 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5919 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5920 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5921 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5922 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5923 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5924 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5925 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5926 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5931 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5932 offsetof(CPUSPARCState
, regwptr
),
5935 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5936 *r32
[i
].ptr
= tcg_global_mem_new_i32(tcg_env
, r32
[i
].off
, r32
[i
].name
);
5939 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5940 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5944 for (i
= 1; i
< 8; ++i
) {
5945 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5946 offsetof(CPUSPARCState
, gregs
[i
]),
5950 for (i
= 8; i
< 32; ++i
) {
5951 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5952 (i
- 8) * sizeof(target_ulong
),
5956 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5957 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5958 offsetof(CPUSPARCState
, fpr
[i
]),
5963 void sparc_restore_state_to_opc(CPUState
*cs
,
5964 const TranslationBlock
*tb
,
5965 const uint64_t *data
)
5967 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5968 CPUSPARCState
*env
= &cpu
->env
;
5969 target_ulong pc
= data
[0];
5970 target_ulong npc
= data
[1];
5973 if (npc
== DYNAMIC_PC
) {
5974 /* dynamic NPC: already stored */
5975 } else if (npc
& JUMP_PC
) {
5976 /* jump PC: use 'cond' and the jump targets of the translation */
5978 env
->npc
= npc
& ~3;